APIUtilityVerifier BooleanVerifier
@Test public void testUnregistrationReturnValue(){
RefreshHandler mockHandler=Mockito.mock(RefreshHandler.class);
RefreshRegistry.defaultRegistry().register("test",mockHandler);
boolean ret=RefreshRegistry.defaultRegistry().unregister("test",mockHandler);
assertTrue(ret);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue",canPutInMockQueue());
int lastMockQueueConstructions=mockQueueConstructions;
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshCallQueue"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 0",0,exitCode);
assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue());
}
catch ( IOException ioe) {
fail("Could not put into queue at all");
}
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
tmpDir=new File(System.getProperty("test.build.data","target"),UUID.randomUUID().toString()).getAbsoluteFile();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks");
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
createAKey("mykey",conf);
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not an HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
APIUtilityVerifier TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
String[] racks={"/rack1","/rack1","/rack2","/rack2","/rack2","/rack3","/rack4","/rack4"};
String[] hosts={"host1","host2","host3","host4","host5","host6","host7","host8"};
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
BooleanVerifier
@Test @SuppressWarnings("unchecked") public void testWriteJson() throws Exception {
StringWriter sw=new StringWriter();
ConfServlet.writeResponse(getTestConf(),sw,"json");
String json=sw.toString();
boolean foundSetting=false;
Object parsed=JSON.parse(json);
Object[] properties=((Map)parsed).get("properties");
for ( Object o : properties) {
Map propertyInfo=(Map)o;
String key=(String)propertyInfo.get("key");
String val=(String)propertyInfo.get("value");
String resource=(String)propertyInfo.get("resource");
System.err.println("k: " + key + " v: "+ val+ " r: "+ resource);
if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && "programatically".equals(resource)) {
foundSetting=true;
}
}
assertTrue(foundSetting);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteXml() throws Exception {
StringWriter sw=new StringWriter();
ConfServlet.writeResponse(getTestConf(),sw,"xml");
String xml=sw.toString();
DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance();
DocumentBuilder builder=docBuilderFactory.newDocumentBuilder();
Document doc=builder.parse(new InputSource(new StringReader(xml)));
NodeList nameNodes=doc.getElementsByTagName("name");
boolean foundSetting=false;
for (int i=0; i < nameNodes.getLength(); i++) {
Node nameNode=nameNodes.item(i);
String key=nameNode.getTextContent();
System.err.println("xml key: " + key);
if (TEST_KEY.equals(key)) {
foundSetting=true;
Element propertyElem=(Element)nameNode.getParentNode();
String val=propertyElem.getElementsByTagName("value").item(0).getTextContent();
assertEquals(TEST_VAL,val);
}
}
assertTrue(foundSetting);
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIteratorWithDeprecatedKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK"});
conf.set("k","v");
conf.set("dK","V");
assertEquals("V",conf.get("dK"));
assertEquals("V",conf.get("nK"));
conf.set("nK","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK"));
boolean kFound=false;
boolean dKFound=false;
boolean nKFound=false;
for ( Map.Entry entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v",entry.getValue());
kFound=true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VV",entry.getValue());
dKFound=true;
}
if (entry.getKey().equals("nK")) {
assertEquals("VV",entry.getValue());
nKFound=true;
}
}
assertTrue("regular Key not found",kFound);
assertTrue("deprecated Key not found",dKFound);
assertTrue("new Key not found",nKFound);
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK1","nK2"});
conf.set("k","v");
conf.set("dK","V");
assertEquals("V",conf.get("dK"));
assertEquals("V",conf.get("nK1"));
assertEquals("V",conf.get("nK2"));
conf.set("nK1","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK1"));
assertEquals("VV",conf.get("nK2"));
conf.set("nK2","VVV");
assertEquals("VVV",conf.get("dK"));
assertEquals("VVV",conf.get("nK2"));
assertEquals("VVV",conf.get("nK1"));
boolean kFound=false;
boolean dKFound=false;
boolean nK1Found=false;
boolean nK2Found=false;
for ( Map.Entry entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v",entry.getValue());
kFound=true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VVV",entry.getValue());
dKFound=true;
}
if (entry.getKey().equals("nK1")) {
assertEquals("VVV",entry.getValue());
nK1Found=true;
}
if (entry.getKey().equals("nK2")) {
assertEquals("VVV",entry.getValue());
nK2Found=true;
}
}
assertTrue("regular Key not found",kFound);
assertTrue("deprecated Key not found",dKFound);
assertTrue("new Key 1 not found",nK1Found);
assertTrue("new Key 2 not found",nK2Found);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testProfileParamsDefaults(){
JobConf configuration=new JobConf();
String result=configuration.getProfileParams();
Assert.assertNotNull(result);
Assert.assertTrue(result.contains("file=%s"));
Assert.assertTrue(result.startsWith("-agentlib:hprof"));
}
BooleanVerifier
/**
* Ensure that by default JobContext.MAX_TASK_FAILURES_PER_TRACKER is less
* JobContext.MAP_MAX_ATTEMPTS and JobContext.REDUCE_MAX_ATTEMPTS so that
* failed tasks will be retried on other nodes
*/
@Test public void testMaxTaskFailuresPerTracker(){
JobConf jobConf=new JobConf(true);
Assert.assertTrue("By default JobContext.MAX_TASK_FAILURES_PER_TRACKER was " + "not less than JobContext.MAP_MAX_ATTEMPTS and REDUCE_MAX_ATTEMPTS",jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxMapAttempts() && jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxReduceAttempts());
}
InternalCallVerifier BooleanVerifier
/**
* Test reconfiguring a Reconfigurable.
*/
@Test public void testReconfigure(){
ReconfigurableDummy dummy=new ReconfigurableDummy(conf1);
assertTrue(PROP1 + " set to wrong value ",dummy.getConf().get(PROP1).equals(VAL1));
assertTrue(PROP2 + " set to wrong value ",dummy.getConf().get(PROP2).equals(VAL1));
assertTrue(PROP3 + " set to wrong value ",dummy.getConf().get(PROP3).equals(VAL1));
assertTrue(PROP4 + " set to wrong value ",dummy.getConf().get(PROP4) == null);
assertTrue(PROP5 + " set to wrong value ",dummy.getConf().get(PROP5) == null);
assertTrue(PROP1 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP1));
assertTrue(PROP2 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP2));
assertFalse(PROP3 + " should not be reconfigurable ",dummy.isPropertyReconfigurable(PROP3));
assertTrue(PROP4 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP4));
assertFalse(PROP5 + " should not be reconfigurable ",dummy.isPropertyReconfigurable(PROP5));
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,VAL1);
assertTrue(PROP1 + " set to wrong value ",dummy.getConf().get(PROP1).equals(VAL1));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,null);
assertTrue(PROP1 + "set to wrong value ",dummy.getConf().get(PROP1) == null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,VAL2);
assertTrue(PROP1 + "set to wrong value ",dummy.getConf().get(PROP1).equals(VAL2));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP4,null);
assertTrue(PROP4 + "set to wrong value ",dummy.getConf().get(PROP4) == null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP4,VAL1);
assertTrue(PROP4 + "set to wrong value ",dummy.getConf().get(PROP4).equals(VAL1));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP5,null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP5,VAL1);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP3,VAL2);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP3,null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test ReconfigurationUtil.getChangedProperties.
*/
@Test public void testGetChangedProperties(){
Collection changes=ReconfigurationUtil.getChangedProperties(conf2,conf1);
assertTrue("expected 3 changed properties but got " + changes.size(),changes.size() == 3);
boolean changeFound=false;
boolean unsetFound=false;
boolean setFound=false;
for ( ReconfigurationUtil.PropertyChange c : changes) {
if (c.prop.equals(PROP2) && c.oldVal != null && c.oldVal.equals(VAL1) && c.newVal != null && c.newVal.equals(VAL2)) {
changeFound=true;
}
else if (c.prop.equals(PROP3) && c.oldVal != null && c.oldVal.equals(VAL1) && c.newVal == null) {
unsetFound=true;
}
else if (c.prop.equals(PROP4) && c.oldVal == null && c.newVal != null && c.newVal.equals(VAL1)) {
setFound=true;
}
}
assertTrue("not all changes have been applied",changeFound && unsetFound && setFound);
}
InternalCallVerifier BooleanVerifier
/**
* Test whether configuration changes are visible in another thread.
*/
@Test public void testThread() throws ReconfigurationException {
ReconfigurableDummy dummy=new ReconfigurableDummy(conf1);
assertTrue(dummy.getConf().get(PROP1).equals(VAL1));
Thread dummyThread=new Thread(dummy);
dummyThread.start();
try {
Thread.sleep(500);
}
catch ( InterruptedException ignore) {
}
dummy.reconfigureProperty(PROP1,VAL2);
long endWait=Time.now() + 2000;
while (dummyThread.isAlive() && Time.now() < endWait) {
try {
Thread.sleep(50);
}
catch ( InterruptedException ignore) {
}
}
assertFalse("dummy thread should not be alive",dummyThread.isAlive());
dummy.running=false;
try {
dummyThread.join();
}
catch ( InterruptedException ignore) {
}
assertTrue(PROP1 + " is set to wrong value",dummy.getConf().get(PROP1).equals(VAL2));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test HA failover, where BK, as the shared storage, fails.
* Once it becomes available again, a standby can come up.
* Verify that any write happening after the BK fail is not
* available on the standby.
*/
@Test public void testFailoverWithFailingBKCluster() throws Exception {
int ensembleSize=numBookies + 1;
BookieServer newBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
BookieServer replacementBookie=null;
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p1=new Path("/testBKJMFailingBKCluster1");
Path p2=new Path("/testBKJMFailingBKCluster2");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
newBookie.shutdown();
assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
fs.mkdirs(p2);
fail("mkdirs should result in the NN exiting");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
}
catch ( ExitException ee) {
assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal"));
}
replacementBookie=bkutil.newBookie();
assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
cluster.transitionToActive(1);
assertTrue(fs.exists(p1));
assertFalse(fs.exists(p2));
}
finally {
newBookie.shutdown();
if (replacementBookie != null) {
replacementBookie.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
* the edits log segments to new bkjm shared edits.
* @throws Exception
*/
@Test public void testInitializeBKSharedEdits() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
HAUtil.setAllowStandbyReads(conf,true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSNNTopology topology=MiniDFSNNTopology.simpleHATopology();
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
cluster.shutdownNameNodes();
File shareddir=new File(cluster.getSharedEditsDir(0,1));
assertTrue("Initial Shared edits dir not fully deleted",FileUtil.fullyDelete(shareddir));
assertCanNotStartNamenode(cluster,0);
assertCanNotStartNamenode(cluster,1);
Configuration nn1Conf=cluster.getConfiguration(0);
Configuration nn2Conf=cluster.getConfiguration(1);
nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/initializeSharedEdits").toString());
nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/initializeSharedEdits").toString());
BKJMUtil.addJournalManagerDefinition(nn1Conf);
BKJMUtil.addJournalManagerDefinition(nn2Conf);
assertFalse(NameNode.initializeSharedEdits(nn1Conf));
assertCanStartHANameNodes(cluster,conf,"/testBKJMInitialize");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test simple HA failover usecase with BK
*/
@Test public void testFailoverWithBK() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailover").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p=new Path("/testBKJMfailover");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(p));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that two namenodes can't continue as primary
*/
@Test public void testMultiplePrimariesStarted() throws Exception {
Path p1=new Path("/testBKJMMultiplePrimary");
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
nn1.getRpcServer().rollEditLog();
cluster.transitionToActive(1);
fs=cluster.getFileSystem(0);
try {
fs.delete(p1,true);
fail("Log update on older active should cause it to exit");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that bkjm will refuse open a stream on an empty
* ledger.
*/
@Test public void testEmptyInputStream() throws Exception {
ZooKeeper zk=BKJMUtil.connectZooKeeper();
BookKeeper bkc=new BookKeeper(new ClientConfiguration(),zk);
try {
LedgerHandle lh=bkc.createLedger(BookKeeper.DigestType.CRC32,"foobar".getBytes());
lh.close();
EditLogLedgerMetadata metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234);
try {
new BookKeeperEditLogInputStream(lh,metadata,-1);
fail("Shouldn't get this far, should have thrown");
}
catch ( IOException ioe) {
assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
}
metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234);
try {
new BookKeeperEditLogInputStream(lh,metadata,0);
fail("Shouldn't get this far, should have thrown");
}
catch ( IOException ioe) {
assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
}
}
finally {
bkc.close();
zk.close();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that if enough bookies fail to prevent an ensemble,
* writes the bookkeeper will fail. Test that when once again
* an ensemble is available, it can continue to write.
*/
@Test public void testAllBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
bookieToFail.shutdown();
assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
fail("should not get to this stage");
}
catch ( IOException ioe) {
LOG.debug("Error writing to bookkeeper",ioe);
assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper"));
}
replacementBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10));
bkjm.recoverUnfinalizedSegments();
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
BooleanVerifier
/**
* Tests that concurrent calls to format will still allow one to succeed.
*/
@Test public void testConcurrentFormat() throws Exception {
final URI uri=BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
final NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
for (int i=1; i < 100 * 2; i+=2) {
bkjm.startLogSegment(i,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
bkjm.finalizeLogSegment(i,i + 1);
}
bkjm.close();
final int numThreads=40;
List> threads=new ArrayList>();
final CyclicBarrier barrier=new CyclicBarrier(numThreads);
for (int i=0; i < numThreads; i++) {
threads.add(new Callable(){
public ThreadStatus call(){
BookKeeperJournalManager bkjm=null;
try {
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
barrier.await();
bkjm.format(nsi);
return ThreadStatus.COMPLETED;
}
catch ( IOException ioe) {
LOG.info("Exception formatting ",ioe);
return ThreadStatus.GOODEXCEPTION;
}
catch ( InterruptedException ie) {
LOG.error("Interrupted. Something is broken",ie);
Thread.currentThread().interrupt();
return ThreadStatus.BADEXCEPTION;
}
catch ( Exception e) {
LOG.error("Some other bad exception",e);
return ThreadStatus.BADEXCEPTION;
}
finally {
if (bkjm != null) {
try {
bkjm.close();
}
catch ( IOException ioe) {
LOG.error("Error closing journal manager",ioe);
}
}
}
}
}
);
}
ExecutorService service=Executors.newFixedThreadPool(numThreads);
List> statuses=service.invokeAll(threads,60,TimeUnit.SECONDS);
int numCompleted=0;
for ( Future s : statuses) {
assertTrue(s.isDone());
assertTrue("Thread threw invalid exception",s.get() == ThreadStatus.COMPLETED || s.get() == ThreadStatus.GOODEXCEPTION);
if (s.get() == ThreadStatus.COMPLETED) {
numCompleted++;
}
}
LOG.info("Completed " + numCompleted + " formats");
assertTrue("No thread managed to complete formatting",numCompleted > 0);
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* If a journal manager has an corrupt inprogress node, ensure that we throw
* an error, as this should not be possible, and some third party has
* corrupted the zookeeper state
*/
@Test public void testCorruptInprogressNode() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-corruptInprogress");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
out.close();
bkjm.close();
String inprogressZNode=bkjm.inprogressZNode(101);
zkc.setData(inprogressZNode,"WholeLottaJunk".getBytes(),-1);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
try {
bkjm.recoverUnfinalizedSegments();
fail("Should have failed. There should be no way of creating" + " an empty inprogess znode");
}
catch ( IOException e) {
assertTrue("Exception different than expected",e.getMessage().contains("has no field named"));
}
finally {
bkjm.close();
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* If a journal manager has an empty inprogress node, ensure that we throw an
* error, as this should not be possible, and some third party has corrupted
* the zookeeper state
*/
@Test public void testEmptyInprogressNode() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
out.close();
bkjm.close();
String inprogressZNode=bkjm.inprogressZNode(101);
zkc.setData(inprogressZNode,new byte[0],-1);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
try {
bkjm.recoverUnfinalizedSegments();
fail("Should have failed. There should be no way of creating" + " an empty inprogess znode");
}
catch ( IOException e) {
assertTrue("Exception different than expected",e.getMessage().contains("Invalid/Incomplete data in znode"));
}
finally {
bkjm.close();
}
}
TestCleaner BooleanVerifier HybridVerifier
@After public void cleanUp() throws IOException {
FileUtil.setWritable(base,true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=120000) public void testGetInstance() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
try {
cipher=OpensslCipher.getInstance("AES2/CTR/NoPadding");
Assert.fail("Should specify correct algorithm.");
}
catch ( NoSuchAlgorithmException e) {
}
try {
cipher=OpensslCipher.getInstance("AES/CTR/NoPadding2");
Assert.fail("Should specify correct padding.");
}
catch ( NoSuchPaddingException e) {
}
}
UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=120000) public void testDoFinalArguments() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
cipher.init(OpensslCipher.ENCRYPT_MODE,key,iv);
ByteBuffer output=ByteBuffer.allocate(1024);
try {
cipher.doFinal(output);
Assert.fail("Output buffer should be direct buffer.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Direct buffer is required",e);
}
}
UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=120000) public void testUpdateArguments() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
cipher.init(OpensslCipher.ENCRYPT_MODE,key,iv);
ByteBuffer input=ByteBuffer.allocate(1024);
ByteBuffer output=ByteBuffer.allocate(1024);
try {
cipher.update(input,output);
Assert.fail("Input and output buffer should be direct buffer.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Direct buffers are required",e);
}
input=ByteBuffer.allocateDirect(1024);
output=ByteBuffer.allocateDirect(1000);
try {
cipher.update(input,output);
Assert.fail("Output buffer length should be sufficient " + "to store output data");
}
catch ( ShortBufferException e) {
GenericTestUtils.assertExceptionContains("Output buffer is not sufficient",e);
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testParseVersionName() throws Exception {
assertEquals("/a/b",KeyProvider.getBaseName("/a/b@3"));
assertEquals("/aaa",KeyProvider.getBaseName("/aaa@112"));
try {
KeyProvider.getBaseName("no-slashes");
assertTrue("should have thrown",false);
}
catch ( IOException e) {
assertTrue(true);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMetadata() throws Exception {
DateFormat format=new SimpleDateFormat("y/m/d");
Date date=format.parse("2013/12/25");
KeyProvider.Metadata meta=new KeyProvider.Metadata("myCipher",100,null,null,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertNull(meta.getDescription());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
KeyProvider.Metadata second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertNull(second.getDescription());
assertTrue(second.getAttributes().isEmpty());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
int newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
format=new SimpleDateFormat("y/m/d");
date=format.parse("2013/12/25");
Map attributes=new HashMap();
attributes.put("a","A");
meta=new KeyProvider.Metadata("myCipher",100,"description",attributes,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertEquals("description",meta.getDescription());
assertEquals(attributes,meta.getAttributes());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertEquals(meta.getDescription(),second.getDescription());
assertEquals(meta.getAttributes(),second.getAttributes());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
File oldFile=new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted",!oldFile.exists());
verifyAfterReload(file,provider);
assertTrue(!oldFile.exists());
File newFile=new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
}
catch ( Exception e) {
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
file.renameTo(newFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _NEW file !!");
}
verifyAfterReload(file,provider);
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _OLD file !!");
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file,provider);
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUriErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unkn@own:/x/y");
try {
List providers=KeyProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("Bad configuration of " + KeyProviderFactory.KEY_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage());
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFactoryErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unknown:///");
try {
List providers=KeyProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("No KeyProviderFactory for unknown:/// in " + KeyProviderFactory.KEY_PROVIDER_PATH,e.getMessage());
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidProvider() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidKeySize() throws Exception {
final String[] args1={"create","key1","-size","56","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
final String[] args1={"create","key1"};
int rc=0;
KeyShell ks=new KeyShell();
Configuration config=new Configuration();
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///");
ks.setConf(config);
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc=0;
String keyName="key1";
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1={"create",keyName,"-provider",jceksProvider};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
String listOut=listKeys(ks,false);
assertTrue(listOut.contains(keyName));
listOut=listKeys(ks,true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2={"roll",keyName,"-provider",jceksProvider};
rc=ks.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled."));
deleteKey(ks,keyName);
listOut=listKeys(ks,false);
assertFalse(listOut,listOut.contains(keyName));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAttributes() throws Exception {
int rc;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
outContent.reset();
final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"};
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="foo";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="=";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="a=b=c";
rc=ks.run(args2);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
outContent.reset();
final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"};
rc=ks.run(args3);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
outContent.reset();
final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"};
rc=ks.run(args4);
assertEquals(1,rc);
deleteKey(ks,"keyattr1");
deleteKey(ks,"keyattr2");
deleteKey(ks,"keyattr3");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidCipher() throws Exception {
final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","user:///"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFullCipher() throws Exception {
final String keyName="key1";
final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
deleteKey(ks,keyName);
}
BooleanVerifier
/**
* Test the configurable timeout in the KMSClientProvider. Open up a
* socket, but don't accept connections for it. This leads to a timeout
* when the KMS client attempts to connect.
* @throws Exception
*/
@Test public void testKMSTimeout() throws Exception {
File confDir=getTestDir();
Configuration conf=createBaseKMSConf(confDir);
conf.setInt(KMSClientProvider.TIMEOUT_ATTR,1);
writeConf(confDir,conf);
ServerSocket sock;
int port;
try {
sock=new ServerSocket(0,50,InetAddress.getByName("localhost"));
port=sock.getLocalPort();
}
catch ( Exception e) {
return;
}
URL url=new URL("http://localhost:" + port + "/kms");
URI uri=createKMSUri(url);
boolean caughtTimeout=false;
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
kp.getKeys();
}
catch ( SocketTimeoutException e) {
caughtTimeout=true;
}
catch ( IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(),false);
}
caughtTimeout=false;
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).generateEncryptedKey("a");
}
catch ( SocketTimeoutException e) {
caughtTimeout=true;
}
catch ( IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(),false);
}
caughtTimeout=false;
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).decryptEncryptedKey(new KMSClientProvider.KMSEncryptedKeyVersion("a","a",new byte[]{1,2},"EEK",new byte[]{1,2}));
}
catch ( SocketTimeoutException e) {
caughtTimeout=true;
}
catch ( IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(),false);
}
Assert.assertTrue(caughtTimeout);
sock.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKMSProvider() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir=getTestDir();
conf=createBaseKMSConf(confDir);
writeConf(confDir,conf);
runServer(null,null,confDir,new KMSCallable(){
@Override public Void call() throws Exception {
Date started=new Date();
Configuration conf=new Configuration();
URI uri=createKMSUri(getKMSUrl());
KeyProvider kp=new KMSClientProvider(uri,conf);
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
KeyProvider.Options options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0=kp.createKey("k1",options);
Assert.assertNotNull(kv0);
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
KeyProvider.KeyVersion kv1=kp.getKeyVersion(kv0.getVersionName());
Assert.assertEquals(kv0.getVersionName(),kv1.getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion cv1=kp.getCurrentKey("k1");
Assert.assertEquals(kv0.getVersionName(),cv1.getVersionName());
Assert.assertNotNull(cv1.getMaterial());
KeyProvider.Metadata m1=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m1.getCipher());
Assert.assertEquals("AES",m1.getAlgorithm());
Assert.assertEquals(128,m1.getBitLength());
Assert.assertEquals(1,m1.getVersions());
Assert.assertNotNull(m1.getCreated());
Assert.assertTrue(started.before(m1.getCreated()));
List lkv1=kp.getKeyVersions("k1");
Assert.assertEquals(1,lkv1.size());
Assert.assertEquals(kv0.getVersionName(),lkv1.get(0).getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion kv2=kp.rollNewVersion("k1");
Assert.assertNotSame(kv0.getVersionName(),kv2.getVersionName());
Assert.assertNotNull(kv2.getMaterial());
kv2=kp.getKeyVersion(kv2.getVersionName());
boolean eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertFalse(eq);
KeyProvider.KeyVersion cv2=kp.getCurrentKey("k1");
Assert.assertEquals(kv2.getVersionName(),cv2.getVersionName());
Assert.assertNotNull(cv2.getMaterial());
eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertTrue(eq);
List lkv2=kp.getKeyVersions("k1");
Assert.assertEquals(2,lkv2.size());
Assert.assertEquals(kv1.getVersionName(),lkv2.get(0).getVersionName());
Assert.assertNotNull(lkv2.get(0).getMaterial());
Assert.assertEquals(kv2.getVersionName(),lkv2.get(1).getVersionName());
Assert.assertNotNull(lkv2.get(1).getMaterial());
KeyProvider.Metadata m2=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m2.getCipher());
Assert.assertEquals("AES",m2.getAlgorithm());
Assert.assertEquals(128,m2.getBitLength());
Assert.assertEquals(2,m2.getVersions());
Assert.assertNotNull(m2.getCreated());
Assert.assertTrue(started.before(m2.getCreated()));
List ks1=kp.getKeys();
Assert.assertEquals(1,ks1.size());
Assert.assertEquals("k1",ks1.get(0));
KeyProvider.Metadata[] kms1=kp.getKeysMetadata("k1");
Assert.assertEquals(1,kms1.length);
Assert.assertEquals("AES/CTR/NoPadding",kms1[0].getCipher());
Assert.assertEquals("AES",kms1[0].getAlgorithm());
Assert.assertEquals(128,kms1[0].getBitLength());
Assert.assertEquals(2,kms1[0].getVersions());
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
KeyProvider.KeyVersion kv=kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
Assert.assertEquals(kv.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyProvider.KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
KeyProvider.KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length,k1.getMaterial().length);
EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
boolean isEq=true;
for (int i=0; isEq && i < ek2.getEncryptedKeyVersion().getMaterial().length; i++) {
isEq=k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
kp.deleteKey("k1");
Assert.assertNull(kp.getKeyVersion("k1"));
Assert.assertNull(kp.getKeyVersions("k1"));
Assert.assertNull(kp.getMetadata("k1"));
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
kp.createKey("k2",options);
KeyProvider.Metadata meta=kp.getMetadata("k2");
Assert.assertNull(meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
kp.createKey("k3",options);
meta=kp.getMetadata("k3");
Assert.assertEquals("d",meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
Map attributes=new HashMap();
attributes.put("a","A");
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setAttributes(attributes);
kp.createKey("k4",options);
meta=kp.getMetadata("k4");
Assert.assertNull(meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
options.setAttributes(attributes);
kp.createKey("k5",options);
meta=kp.getMetadata("k5");
Assert.assertEquals("d",meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
KeyProviderDelegationTokenExtension kpdte=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
Credentials credentials=new Credentials();
kpdte.addDelegationTokens("foo",credentials);
Assert.assertEquals(1,credentials.getAllTokens().size());
InetSocketAddress kmsAddr=new InetSocketAddress(getKMSUrl().getHost(),getKMSUrl().getPort());
Assert.assertEquals(new Text("kms-dt"),credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind());
return null;
}
}
);
}
BooleanVerifier
@Test public void testDefaults(){
KMSACLs acls=new KMSACLs(new Configuration(false));
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,UserGroupInformation.createRemoteUser("foo")));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testCustom(){
Configuration conf=new Configuration(false);
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(),type.toString() + " ");
}
KMSACLs acls=new KMSACLs(conf);
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,UserGroupInformation.createRemoteUser(type.toString())));
Assert.assertFalse(acls.hasAccess(type,UserGroupInformation.createRemoteUser("foo")));
}
}
BooleanVerifier
@Test public void testAggregationUnauth() throws Exception {
UserGroupInformation luser=Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.unauthorized(luser,KMSOp.GENERATE_EEK,"k2");
Thread.sleep(1000);
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
kmsAudit.unauthorized(luser,KMSOp.GENERATE_EEK,"k3");
kmsAudit.ok(luser,KMSOp.GENERATE_EEK,"k3","testmsg");
Thread.sleep(2000);
String out=getAndResetLogOutput();
System.out.println(out);
Assert.assertTrue(out.matches("UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] " + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg"+ "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] "+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
}
BooleanVerifier
@Test public void testAggregation() throws Exception {
UserGroupInformation luser=Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DELETE_KEY,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.ROLL_NEW_VERSION,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
Thread.sleep(1500);
kmsAudit.ok(luser,KMSOp.DECRYPT_EEK,"k1","testmsg");
Thread.sleep(1500);
String out=getAndResetLogOutput();
System.out.println(out);
Assert.assertTrue(out.matches("OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg" + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop2"),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOverwrite() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
fSys.mkdirs(path.getParent());
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
try {
createFile(path);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
FSDataOutputStream out=fSys.create(path,true,4096);
out.write(data,0,data.length);
out.close();
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
createFile(getTestRootPath(fSys,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testSubDir));
Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory());
workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
absoluteDir=getTestRootPath(fSys,"test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
createFile(fSys,absolutePath);
fSys.open(new Path("foo")).close();
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(isDir(fSys,new Path(absoluteDir,"newDir")));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryToItself() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/dir");
fSys.mkdirs(src);
try {
rename(src,src,false,true,false,Rename.NONE);
Assert.fail("Renamed directory to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,src,false,true,false,Rename.OVERWRITE);
Assert.fail("Renamed directory to itself");
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryAsEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/dir");
fSys.mkdirs(src);
createFile(getTestRootPath(fSys,"test/hadoop/dir/file1"));
createFile(getTestRootPath(fSys,"test/hadoop/dir/subdir/file2"));
Path dst=getTestRootPath(fSys,"test/new/newdir");
fSys.mkdirs(dst);
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
rename(src,dst,true,false,true,Rename.OVERWRITE);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/file");
createFile(src);
Path dst=getTestRootPath(fSys,"test/nonExistent/newfile");
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
fSys.mkdirs(dir);
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Deleted",fSys.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileToItself() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/file");
createFile(src);
try {
rename(src,src,false,true,false,Rename.NONE);
Assert.fail("Renamed file to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,src,false,true,false,Rename.OVERWRITE);
Assert.fail("Renamed file to itself");
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fSys,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fSys,path.getParent()));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/nonExistent");
Path dst=getTestRootPath(fSys,"test/new/newpath");
try {
rename(src,dst,false,false,false,Rename.NONE);
Assert.fail("Should throw FileNotFoundException");
}
catch ( IOException e) {
Log.info("XXX",e);
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,false,false,Rename.OVERWRITE);
Assert.fail("Should throw FileNotFoundException");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
Path file=getTestRootPath(fSys,"test/hadoop/file");
Path subdir=getTestRootPath(fSys,"test/hadoop/subdir");
createFile(file);
fSys.mkdirs(subdir);
Assert.assertTrue("File exists",exists(fSys,file));
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Subdir exists",exists(fSys,subdir));
try {
fSys.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fSys,file));
Assert.assertTrue("Dir still exists",exists(fSys,dir));
Assert.assertTrue("Subdir still exists",exists(fSys,subdir));
Assert.assertTrue("Deleted",fSys.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fSys,file));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fSys,subdir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BooleanVerifier
@Test public void testDeleteNonExistentFile() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
Assert.assertFalse("Doesn't exist",exists(fSys,path));
Assert.assertFalse("No deletion",fSys.delete(path,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fSys,"test/hadoop/a"),getTestRootPath(fSys,"test/hadoop/b"),getTestRootPath(fSys,"test/hadoop/c/1")};
Assert.assertFalse(exists(fSys,testDirs[0]));
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
FileStatus[] paths=fSys.listStatus(getTestRootPath(fSys,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fSys,"test/hadoop"),paths[0].getPath());
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/a"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/b"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/c"),paths));
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFsStatus() throws Exception {
FsStatus fsStatus=fSys.getStatus(null);
Assert.assertNotNull(fsStatus);
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/dir");
fSys.mkdirs(src);
createFile(getTestRootPath(fSys,"test/hadoop/dir/file1"));
createFile(getTestRootPath(fSys,"test/hadoop/dir/subdir/file2"));
Path dst=getTestRootPath(fSys,"test/new/newdir");
fSys.mkdirs(dst);
createFile(getTestRootPath(fSys,"test/new/newdir/file1"));
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException ex) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fSys,parentDir));
Assert.assertFalse(isFile(fSys,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fSys,grandparentDir));
Assert.assertFalse(isFile(fSys,grandparentDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testCopyToLocalWithUseRawLocalFileSystemOption() throws Exception {
Configuration conf=new Configuration();
FileSystem fSys=new RawLocalFileSystem();
Path fileToFS=new Path(getTestRootDir(),"fs.txt");
Path fileToLFS=new Path(getTestRootDir(),"test.txt");
Path crcFileAtLFS=new Path(getTestRootDir(),".test.txt.crc");
fSys.initialize(new URI("file:///"),conf);
writeFile(fSys,fileToFS);
if (fSys.exists(crcFileAtLFS)) Assert.assertTrue("CRC files not deleted",fSys.delete(crcFileAtLFS,true));
fSys.copyToLocalFile(false,fileToFS,fileToLFS,true);
Assert.assertFalse("CRC files are created",fSys.exists(crcFileAtLFS));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fSys,"test/hadoop/file");
createFile(src);
Path dst=getTestRootPath(fSys,"test/new/existingFile");
createFile(dst);
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
rename(src,dst,true,false,true,Rename.OVERWRITE);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithNonExistingDir() throws IOException {
Path f=getTestRootPath(fc,"NonExistant2/aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateRecursiveWithNonExistingDir() throws IOException {
Path f=getTestRootPath(fc,"NonExisting/foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirNonRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateNonRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop2"),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fc,parentDir));
Assert.assertFalse(isFile(fc,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc,grandparentDir));
Assert.assertFalse(isFile(fc,grandparentDir));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/nonExistent");
Path dst=getTestRootPath(fc,"test/new/newpath");
try {
rename(src,dst,false,false,false,Rename.NONE);
Assert.fail("Should throw FileNotFoundException");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,false,false,Rename.OVERWRITE);
Assert.fail("Should throw FileNotFoundException");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteOnExitUnexisting() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
boolean registered=fc.deleteOnExit(path);
assertTrue(!registered);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/file");
createFile(src);
Path dst=getTestRootPath(fc,"test/nonExistent/newfile");
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
Path file=getTestRootPath(fc,"test/hadoop/file");
Path subdir=getTestRootPath(fc,"test/hadoop/subdir");
createFile(file);
fc.mkdir(subdir,FsPermission.getDefault(),true);
Assert.assertTrue("File exists",exists(fc,file));
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Subdir exists",exists(fc,subdir));
try {
fc.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fc,file));
Assert.assertTrue("Dir still exists",exists(fc,dir));
Assert.assertTrue("Subdir still exists",exists(fc,subdir));
Assert.assertTrue("Deleted",fc.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fc,file));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fc,subdir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/dir");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
createFile(getTestRootPath(fc,"test/hadoop/dir/file1"));
createFile(getTestRootPath(fc,"test/hadoop/dir/subdir/file2"));
Path dst=getTestRootPath(fc,"test/new/newdir");
fc.mkdir(dst,FileContext.DEFAULT_PERM,true);
createFile(getTestRootPath(fc,"test/new/newdir/file1"));
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException ex) {
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryAsEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/dir");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
createFile(getTestRootPath(fc,"test/hadoop/dir/file1"));
createFile(getTestRootPath(fc,"test/hadoop/dir/subdir/file2"));
Path dst=getTestRootPath(fc,"test/new/newdir");
fc.mkdir(dst,FileContext.DEFAULT_PERM,true);
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
rename(src,dst,true,false,true,Rename.OVERWRITE);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fc,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fc,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fc,path));
Assert.assertEquals("Length",data.length,fc.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fc,path.getParent()));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryToItself() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/dir");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
try {
rename(src,src,false,true,false,Rename.NONE);
Assert.fail("Renamed directory to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,src,false,true,false,Rename.OVERWRITE);
Assert.fail("Renamed directory to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BooleanVerifier
@Test public void testDeleteNonExistentFile() throws IOException {
Path path=getTestRootPath(fc,"test/hadoop/file");
Assert.assertFalse("Doesn't exist",exists(fc,path));
Assert.assertFalse("No deletion",fc.delete(path,true));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameDirectoryToNonExistentParent() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/dir");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
Path dst=getTestRootPath(fc,"test/nonExistent/newdir");
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSetVerifyChecksum() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
try {
fc.setVerifyChecksum(true,path);
out.write(data,0,data.length);
}
finally {
out.close();
}
FileStatus fileStatus=fc.getFileStatus(path);
final long len=fileStatus.getLen();
assertTrue(len == data.length);
byte[] bb=new byte[(int)len];
FSDataInputStream fsdis=fc.open(path);
try {
fsdis.read(bb);
}
finally {
fsdis.close();
}
assertArrayEquals(data,bb);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
fc.mkdir(dir,FsPermission.getDefault(),true);
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Deleted",fc.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileToItself() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/file");
createFile(src);
try {
rename(src,src,false,true,false,Rename.NONE);
Assert.fail("Renamed file to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
rename(src,src,false,true,false,Rename.OVERWRITE);
Assert.fail("Renamed file to itself");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fc.getWorkingDirectory());
workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
absoluteDir=getTestRootPath(fc,"test/existingDir2");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
fc.create(absolutePath,EnumSet.of(CREATE)).close();
fc.open(new Path("foo")).close();
fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir")));
absoluteDir=getTestRootPath(fc,"nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
}
catch ( Exception e) {
}
absoluteDir=new Path(localFsRootPath,"existingDir");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path aRegularFile=new Path("aRegularFile");
createFile(aRegularFile);
try {
fc.setWorkingDirectory(aRegularFile);
fail("An IOException expected.");
}
catch ( IOException ioe) {
}
}
TestCleaner InternalCallVerifier BooleanVerifier HybridVerifier
@After public void tearDown() throws Exception {
boolean del=fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")),true);
assertTrue(del);
fc.delete(localFsRootPath,true);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
createFile(getTestRootPath(fc,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fc,"test/hadoop/file/subdir");
try {
fc.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testSubDir));
Path testDeepSubDir=getTestRootPath(fc,"test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testDeepSubDir));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFsStatus() throws Exception {
FsStatus fsStatus=fc.getFsStatus(null);
Assert.assertNotNull(fsStatus);
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fc,"test/hadoop/a"),getTestRootPath(fc,"test/hadoop/b"),getTestRootPath(fc,"test/hadoop/c/1")};
Assert.assertFalse(exists(fc,testDirs[0]));
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc.util().listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),paths[0].getPath());
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),paths));
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsIterator=fc.listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),pathsIterator.next().getPath());
Assert.assertFalse(pathsIterator.hasNext());
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop"));
FileStatus[] subdirs=new FileStatus[3];
int i=0;
while (i < 3 && pathsIterator.hasNext()) {
subdirs[i++]=pathsIterator.next();
}
Assert.assertFalse(pathsIterator.hasNext());
Assert.assertTrue(i == 3);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),subdirs));
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertFalse(pathsIterator.hasNext());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testListCorruptFileBlocks() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
createFile(path);
try {
final RemoteIterator remoteIterator=fc.listCorruptFileBlocks(path);
if (listCorruptedBlocksSupported()) {
assertTrue(remoteIterator != null);
Path p;
while (remoteIterator.hasNext()) {
p=remoteIterator.next();
System.out.println("corrupted block: " + p);
}
try {
remoteIterator.next();
fail();
}
catch ( NoSuchElementException nsee) {
}
}
else {
fail();
}
}
catch ( UnsupportedOperationException uoe) {
if (listCorruptedBlocksSupported()) {
fail(uoe.toString());
}
else {
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/file");
createFile(src);
Path dst=getTestRootPath(fc,"test/new/existingFile");
createFile(dst);
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
rename(src,dst,true,false,true,Rename.OVERWRITE);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteDirectory() throws IOException {
String dirName="dirTest";
Path testDirPath=qualifiedPath(dirName,fc2);
Assert.assertFalse(exists(fc2,testDirPath));
fc1.mkdir(testDirPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDirPath));
Assert.assertTrue(isDir(fc2,testDirPath));
fc2.delete(testDirPath,true);
Assert.assertFalse(isDir(fc2,testDirPath));
String dirNames[]={"deleteTest/testDir","deleteTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/test_DIr","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,true));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(isDir(fc2,testPath));
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testIsDirectory() throws IOException {
String dirName="dirTest";
String invalidDir="nonExistantDir";
String rootDir="/";
Path existingPath=qualifiedPath(dirName,fc2);
Path nonExistingPath=qualifiedPath(invalidDir,fc2);
Path pathToRootDir=qualifiedPath(rootDir,fc2);
fc1.mkdir(existingPath,FsPermission.getDefault(),true);
Assert.assertTrue(isDir(fc2,existingPath));
Assert.assertTrue(isDir(fc2,pathToRootDir));
Assert.assertFalse(isDir(fc2,nonExistingPath));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteFile() throws IOException {
Path testPath=qualifiedPath("testFile",fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
fc2.delete(testPath,false);
Assert.assertFalse(exists(fc2,testPath));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateFile() throws IOException {
String fileNames[]={"testFile","test File","test*File","test#File","test1234","1234Test","test)File","test_File","()&^%$#@!~_+}{>"," ","^ "};
for ( String f : fileNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingDirectory() throws IOException {
String testDirName="testFile";
Path testPath=qualifiedPath(testDirName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCreateFileWithNullName() throws IOException {
String fileName=null;
try {
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.fail("Create file with null name should throw IllegalArgumentException.");
}
catch ( IllegalArgumentException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFile() throws IOException {
String testFileName="testFile";
Path testPath=qualifiedPath(testFileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFileStatus() throws IOException {
String fileName="file1";
Path path2=fc2.makeQualified(new Path(BASE,fileName));
createFile(fc1,path2);
FsStatus fc2Status=fc2.getFsStatus(path2);
Assert.assertNotNull(fc2Status);
Assert.assertTrue(fc2Status.getCapacity() > 0);
Assert.assertTrue(fc2Status.getRemaining() > 0);
Assert.assertTrue(fc2Status.getUsed() > 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFileInDir() throws IOException {
String testFileInDir="testDir/testDir/TestFile";
Path testPath=qualifiedPath(testFileInDir,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateFileInNonExistingDirectory() throws IOException {
String fileName="testDir/testFile";
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(isDir(fc2,testPath.getParent()));
Assert.assertEquals("testDir",testPath.getParent().getName());
Assert.assertTrue(exists(fc2,testPath));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCreateExistingFile() throws IOException {
String fileName="testFile";
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
try {
createFile(fc2,testPath);
Assert.fail("Create existing file should throw an IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue(exists(fc2,testPath));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
final String hPrefix="test/hadoop";
final String[] dirs={hPrefix + "/a",hPrefix + "/b",hPrefix + "/c",hPrefix + "/1",hPrefix + "/#@#@",hPrefix + "/&*#$#$@234"};
ArrayList testDirs=new ArrayList();
for ( String d : dirs) {
if (!isTestableFileNameOnPlatform(d)) {
continue;
}
testDirs.add(qualifiedPath(d,fc2));
}
Assert.assertFalse(exists(fc1,testDirs.get(0)));
for ( Path path : testDirs) {
fc1.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc1.util().listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(qualifiedPath(hPrefix,fc1),paths[0].getPath());
paths=fc1.util().listStatus(qualifiedPath(hPrefix,fc1));
Assert.assertEquals(testDirs.size(),paths.length);
for (int i=0; i < testDirs.size(); i++) {
boolean found=false;
for (int j=0; j < paths.length; j++) {
if (qualifiedPath(testDirs.get(i).toString(),fc1).equals(paths[j].getPath())) {
found=true;
}
}
Assert.assertTrue(testDirs.get(i) + " not found",found);
}
paths=fc1.util().listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsItor=fc1.listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(qualifiedPath(hPrefix,fc1),pathsItor.next().getPath());
Assert.assertFalse(pathsItor.hasNext());
pathsItor=fc1.listStatus(qualifiedPath(hPrefix,fc1));
int dirLen=0;
for (; pathsItor.hasNext(); dirLen++) {
boolean found=false;
FileStatus stat=pathsItor.next();
for (int j=0; j < dirs.length; j++) {
if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) {
found=true;
break;
}
}
Assert.assertTrue(stat.getPath() + " not found",found);
}
Assert.assertEquals(testDirs.size(),dirLen);
pathsItor=fc1.listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertFalse(pathsItor.hasNext());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=qualifiedPath("test/hadoop",fc2);
Assert.assertFalse(exists(fc2,testDir));
fc2.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDir));
createFile(fc1,qualifiedPath("test/hadoop/file",fc2));
Path testSubDir=qualifiedPath("test/hadoop/file/subdir",fc2);
try {
fc1.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testSubDir));
Path testDeepSubDir=qualifiedPath("test/hadoop/file/deep/sub/dir",fc1);
try {
fc2.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCreateDirectory() throws IOException {
Path path=qualifiedPath("test/hadoop",fc2);
Path falsePath=qualifiedPath("path/doesnot.exist",fc2);
Path subDirPath=qualifiedPath("dir0",fc2);
Assert.assertFalse(exists(fc1,path));
Assert.assertFalse(isFile(fc1,path));
Assert.assertFalse(isDir(fc1,path));
fc1.mkdir(path,FsPermission.getDefault(),true);
Assert.assertTrue(isDir(fc2,path));
Assert.assertTrue(exists(fc2,path));
Assert.assertFalse(isFile(fc2,path));
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
Path parentDir=path.getParent();
Assert.assertTrue(exists(fc2,parentDir));
Assert.assertFalse(isFile(fc2,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc2,grandparentDir));
Assert.assertFalse(isFile(fc2,grandparentDir));
Assert.assertFalse(exists(fc2,falsePath));
Assert.assertFalse(isDir(fc2,falsePath));
String dirNames[]={"createTest/testDir","createTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/test_DIr","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testRecursiveFcCopy() throws Exception {
final String ts="some random text";
Path dir1=fileContextTestHelper.getTestRootPath(fc,"dir1");
Path dir2=fileContextTestHelper.getTestRootPath(fc,"dir2");
Path file1=new Path(dir1,"file1");
fc.mkdir(dir1,null,false);
writeFile(fc,file1,ts.getBytes());
assertTrue(fc.util().exists(file1));
Path file2=new Path(dir2,"file1");
fc.util().copy(dir1,dir2);
assertTrue("Failed to copy file2 ",fc.util().exists(file2));
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),readFile(fc,file2,ts.getBytes().length)));
}
InternalCallVerifier BooleanVerifier
@Test public void testFcCopy() throws Exception {
final String ts="some random text";
Path file1=fileContextTestHelper.getTestRootPath(fc,"file1");
Path file2=fileContextTestHelper.getTestRootPath(fc,"file2");
writeFile(fc,file1,ts.getBytes());
assertTrue(fc.util().exists(file1));
fc.util().copy(file1,file2);
assertTrue("Failed to copy file2 ",fc.util().exists(file2));
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),readFile(fc,file2,ts.getBytes().length)));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToSymlinkToFile() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir2(),"file");
Path linkToFile=new Path(testBaseDir2(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
try {
wrapper.rename(dir1,linkToFile,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToFile));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateLinkToDirectory() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertTrue(wrapper.getFileStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToDirItLinksTo() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path dir=new Path(testBaseDir1(),"dir");
Path link=new Path(testBaseDir1(),"linkToDir");
wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(dir,link,false);
try {
wrapper.rename(link,dir);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir,wrapper.getLinkTarget(link));
try {
wrapper.rename(link,dir,Rename.OVERWRITE);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir,wrapper.getLinkTarget(link));
}
UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testRenameSymlink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file=new Path(testBaseDir1(),"file");
Path link1=new Path(testBaseDir1(),"linkToFile1");
Path link2=new Path(testBaseDir1(),"linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file,link1,false);
wrapper.rename(link1,link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
assertFalse(wrapper.getFileStatus(link2).isDirectory());
readFile(link2);
readFile(file);
try {
createAndWriteFile(link2);
fail("link was not renamed");
}
catch ( IOException x) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToDanglingSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path link=new Path(testBaseDir2(),"linkToFile");
wrapper.createSymlink(new Path("/doesNotExist"),link,false);
try {
wrapper.rename(dir,link,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir));
assertTrue(wrapper.getFileLinkStatus(link) != null);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkCanCreateParent() throws IOException {
Path file=new Path(testBaseDir1() + "/file");
Path link=new Path(testBaseDir2() + "/linkToFile");
createAndWriteFile(file);
wrapper.delete(new Path(testBaseDir2()),true);
try {
wrapper.createSymlink(file,link,false);
fail("Created link without first creating parent dir");
}
catch ( IOException x) {
}
assertFalse(wrapper.exists(new Path(testBaseDir2())));
wrapper.createSymlink(file,link,true);
readFile(link);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameSymlinkNonExistantDest() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link1=new Path(testBaseDir1(),"linkToFile1");
Path link2=new Path(testBaseDir1(),"linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file,link1,false);
wrapper.rename(link1,link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink() || emulatingSymlinksOnWindows());
readFile(link2);
readFile(file);
assertFalse(wrapper.exists(link1));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileToDanglingSymlink() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path file1=new Path(testBaseDir1(),"file1");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
wrapper.createSymlink(new Path("/doesNotExist"),link,false);
try {
wrapper.rename(file1,link);
}
catch ( IOException e) {
}
wrapper.rename(file1,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToFileItLinksTo() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
try {
wrapper.rename(link,file);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file,wrapper.getLinkTarget(link));
try {
wrapper.rename(link,file,Rename.OVERWRITE);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file,wrapper.getLinkTarget(link));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToSymlinkToFile() throws IOException {
Path file1=new Path(testBaseDir1(),"file1");
Path file2=new Path(testBaseDir1(),"file2");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(file1,link);
fail("Renamed file to symlink w/o overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file1,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameSymlinkViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path linkViaLink=new Path(linkToDir,"link");
Path linkNewViaLink=new Path(linkToDir,"linkNew");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
wrapper.createSymlink(baseDir,linkToDir,false);
wrapper.rename(linkViaLink,linkNewViaLink);
assertFalse(wrapper.exists(linkViaLink));
assertTrue(wrapper.exists(file));
assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink() || emulatingSymlinksOnWindows());
readFile(linkNewViaLink);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testListStatusUsingLink() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
createAndWriteFile(file);
wrapper.createSymlink(new Path(testBaseDir1()),link,false);
FileStatus[] stats=wrapper.listStatus(link);
assertTrue(stats.length == 2 || stats.length == 3);
RemoteIterator statsItor=wrapper.listStatusIterator(link);
int dirLen=0;
while (statsItor.hasNext()) {
statsItor.next();
dirLen++;
}
assertTrue(dirLen == 2 || dirLen == 3);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToItself() throws IOException {
Path file=new Path(testBaseDir1(),"file");
createAndWriteFile(file);
Path link=new Path(testBaseDir1(),"linkToFile1");
wrapper.createSymlink(file,link,false);
try {
wrapper.rename(link,link);
fail("Failed to get expected IOException");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
wrapper.rename(link,link,Rename.OVERWRITE);
fail("Failed to get expected IOException");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
BranchVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatLinkToFile() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file=new Path(testBaseDir1() + "/file");
Path linkToFile=new Path(testBaseDir1() + "/linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory());
assertTrue(wrapper.isSymlink(linkToFile));
assertTrue(wrapper.isFile(linkToFile));
assertFalse(wrapper.isDir(linkToFile));
assertEquals(file,wrapper.getLinkTarget(linkToFile));
if (!"file".equals(getScheme())) {
assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile));
assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath());
assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath());
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAccessDirViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path dir=new Path(testBaseDir1(),"dir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path dirViaLink=new Path(linkToDir,"dir");
wrapper.createSymlink(baseDir,linkToDir,false);
wrapper.mkdir(dirViaLink,FileContext.DEFAULT_PERM,true);
assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory());
FileStatus[] stats=wrapper.listStatus(dirViaLink);
assertEquals(0,stats.length);
RemoteIterator statsItor=wrapper.listStatusIterator(dirViaLink);
assertFalse(statsItor.hasNext());
wrapper.delete(dirViaLink,false);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameDirViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path dir=new Path(baseDir,"dir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path dirViaLink=new Path(linkToDir,"dir");
Path dirNewViaLink=new Path(linkToDir,"dirNew");
wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(baseDir,linkToDir,false);
assertTrue(wrapper.exists(dirViaLink));
wrapper.rename(dirViaLink,dirNewViaLink);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
assertTrue(wrapper.exists(dirNewViaLink));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToSymlinkToDir() throws IOException {
Path dir1=new Path(testBaseDir1());
Path subDir=new Path(testBaseDir2(),"subDir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(subDir,linkToDir,false);
try {
wrapper.rename(dir1,linkToDir,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToDir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testStatDanglingLink() throws IOException {
Path file=new Path("/noSuchFile");
Path link=new Path(testBaseDir1() + "/link");
wrapper.createSymlink(file,link,false);
assertFalse(wrapper.getFileLinkStatus(link).isDirectory());
assertTrue(wrapper.getFileLinkStatus(link).isSymlink());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatLinkToDir() throws IOException {
Path dir=new Path(testBaseDir1());
Path linkToDir=new Path(testBaseDir1() + "/linkToDir");
wrapper.createSymlink(dir,linkToDir,false);
assertFalse(wrapper.getFileStatus(linkToDir).isSymlink());
assertTrue(wrapper.isDir(linkToDir));
assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertEquals(dir,wrapper.getLinkTarget(linkToDir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateLinkToLink() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path linkToLink=new Path(testBaseDir2(),"linkToLink");
Path fileViaLink=new Path(testBaseDir2(),"linkToLink/file");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.createSymlink(linkToDir,linkToLink,false);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path fileNewViaLink=new Path(linkToDir,"fileNew");
createAndWriteFile(file);
wrapper.createSymlink(dir,linkToDir,false);
wrapper.rename(fileViaLink,fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(fileNewViaLink));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToDestViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path subDir=new Path(linkToDir,"subDir");
createAndWriteFile(file);
wrapper.createSymlink(dir,linkToDir,false);
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
try {
wrapper.rename(file,subDir);
fail("Renamed file to a directory");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(file));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToSymlinkToDir() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path subDir=new Path(testBaseDir1(),"subDir");
Path link=new Path(testBaseDir1(),"link");
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(subDir,link,false);
createAndWriteFile(file);
try {
wrapper.rename(file,link);
fail("Renamed file to symlink w/o overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToExistingDir() throws IOException {
Path dir1=new Path(testBaseDir1());
Path dir2=new Path(testBaseDir2());
Path subDir=new Path(testBaseDir2(),"subDir");
Path link=new Path(testBaseDir1(),"linkToDir");
wrapper.createSymlink(dir1,link,false);
try {
wrapper.rename(link,dir2);
fail("Renamed link to a directory");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
try {
wrapper.rename(link,dir2,Rename.OVERWRITE);
fail("Renamed link to a directory");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
wrapper.mkdir(subDir,FsPermission.getDefault(),false);
try {
wrapper.rename(link,dir2,Rename.OVERWRITE);
fail("Renamed link to a directory");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileWithDestParentSymlink() throws IOException {
Path link=new Path(testBaseDir1(),"link");
Path file1=new Path(testBaseDir1(),"file1");
Path file2=new Path(testBaseDir1(),"file2");
Path file3=new Path(link,"file3");
Path dir2=new Path(testBaseDir2());
wrapper.createSymlink(dir2,link,false);
createAndWriteFile(file1);
wrapper.rename(file1,file3);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(file3));
wrapper.rename(file3,file1);
wrapper.delete(link,false);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(file1,file3);
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
createAndWriteFile(file2);
try {
wrapper.rename(file1,file3);
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof ParentNotDirectoryException);
}
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToExistingFile() throws IOException {
Path file1=new Path(testBaseDir1(),"file");
Path file2=new Path(testBaseDir1(),"someFile");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(link,file1);
fail("Renamed w/o passing overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(link,file1,Rename.OVERWRITE);
assertFalse(wrapper.exists(link));
if (!emulatingSymlinksOnWindows()) {
assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
assertEquals(file2,wrapper.getLinkTarget(file1));
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateDirViaSymlink() throws IOException {
Path dir1=new Path(testBaseDir1());
Path subDir=new Path(testBaseDir1(),"subDir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path subDirViaLink=new Path(linkToDir,"subDir");
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.mkdir(subDirViaLink,FileContext.DEFAULT_PERM,true);
assertTrue(wrapper.isDir(subDirViaLink));
wrapper.delete(subDirViaLink,false);
assertFalse(wrapper.exists(subDirViaLink));
assertFalse(wrapper.exists(subDir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateFileViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
wrapper.createSymlink(dir,linkToDir,false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
wrapper.delete(fileViaLink,true);
assertFalse(wrapper.exists(fileViaLink));
}
InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkViaLink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path linkToFile=new Path(linkToDir,"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.createSymlink(fileViaLink,linkToFile,false);
assertTrue(wrapper.isFile(linkToFile));
assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink());
readFile(linkToFile);
assertEquals(fileSize,wrapper.getFileStatus(linkToFile).getLen());
assertEquals(fileViaLink,wrapper.getLinkTarget(linkToFile));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path fileNew=new Path(baseDir,"fileNew");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path fileNewViaLink=new Path(linkToDir,"fileNew");
wrapper.createSymlink(baseDir,linkToDir,false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.exists(fileViaLink));
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.isDir(fileViaLink));
assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink));
readFile(fileViaLink);
appendToFile(fileViaLink);
wrapper.rename(fileViaLink,fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertTrue(wrapper.exists(fileNewViaLink));
readFile(fileNewViaLink);
assertEquals(wrapper.getFileBlockLocations(fileNew,0,1).length,wrapper.getFileBlockLocations(fileNewViaLink,0,1).length);
assertEquals(wrapper.getFileChecksum(fileNew),wrapper.getFileChecksum(fileNewViaLink));
wrapper.delete(fileNewViaLink,true);
assertFalse(wrapper.exists(fileNewViaLink));
}
BooleanVerifier
@Test(timeout=10000) public void testStatRoot() throws IOException {
assertFalse(wrapper.getFileLinkStatus(new Path("/")).isSymlink());
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath1() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path schemeAuth=new Path(testURI().toString());
Path fileWoHost=new Path(getScheme() + "://" + testBaseDir1()+ "/file");
Path link=new Path(testBaseDir1() + "/linkToFile");
Path linkQual=new Path(schemeAuth,testBaseDir1() + "/linkToFile");
FSTestWrapper localWrapper=wrapper.getLocalFSWrapper();
wrapper.createSymlink(fileWoHost,link,false);
assertEquals(fileWoHost,wrapper.getLinkTarget(linkQual));
assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString());
assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(linkQual).getSymlink().toString());
if (wrapper instanceof FileContextTestWrapper) {
assertEquals(fileWoHost.toString(),localWrapper.getFileLinkStatus(linkQual).getSymlink().toString());
}
try {
readFile(link);
fail("DFS requires URIs with schemes have an authority");
}
catch ( java.lang.RuntimeException e) {
assertTrue(wrapper instanceof FileContextTestWrapper);
}
catch ( FileNotFoundException e) {
assertTrue(wrapper instanceof FileSystemTestWrapper);
GenericTestUtils.assertExceptionContains("File does not exist: /test1/file",e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCorruptedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testCorruptChecksum");
Path checksumPath=localFs.getChecksumFile(testPath);
FSDataOutputStream out=localFs.create(testPath,true);
out.write("testing 1 2 3".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
FileStatus stat=localFs.getFileStatus(checksumPath);
out=localFs.getRawFileSystem().create(testPath,true);
out.write("testing stale checksum".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
assertEquals(stat,localFs.getFileStatus(checksumPath));
Exception e=null;
try {
localFs.setVerifyChecksum(true);
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ce) {
e=ce;
}
finally {
assertNotNull("got checksum error",e);
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024);
assertEquals("testing stale checksum",str);
}
InternalCallVerifier BooleanVerifier
@Test public void testStreamType() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testStreamType");
localFs.create(testPath).close();
FSDataInputStream in=null;
localFs.setVerifyChecksum(true);
in=localFs.open(testPath);
assertTrue("stream is input checker",in.getWrappedStream() instanceof FSInputChecker);
localFs.setVerifyChecksum(false);
in=localFs.open(testPath);
assertFalse("stream is not input checker",in.getWrappedStream() instanceof FSInputChecker);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test to ensure that if the checksum file is truncated, a
* ChecksumException is thrown
*/
@Test public void testTruncatedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testtruncatedcrc");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing truncation".getBytes());
fout.close();
Path checksumFile=localFs.getChecksumFile(testPath);
FileSystem rawFs=localFs.getRawFileSystem();
FSDataInputStream checksumStream=rawFs.open(checksumFile);
byte buf[]=new byte[8192];
int read=checksumStream.read(buf,0,buf.length);
checksumStream.close();
FSDataOutputStream replaceStream=rawFs.create(checksumFile);
replaceStream.write(buf,0,read - 1);
replaceStream.close();
try {
readFile(localFs,testPath,1024);
fail("Did not throw a ChecksumException when reading truncated " + "crc file");
}
catch ( ChecksumException ie) {
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing truncation".equals(str));
}
BooleanVerifier
@Test public void testRenameFileIntoDirFile() throws Exception {
Path srcPath=new Path(TEST_ROOT_DIR,"testRenameSrc");
Path dstPath=new Path(TEST_ROOT_DIR,"testRenameDir/testRenameDst");
assertTrue(localFs.mkdirs(dstPath));
verifyRename(srcPath,dstPath,false);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testVerifyChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testPath");
Path testPath11=new Path(TEST_ROOT_DIR,"testPath11");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout=localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
readFile(localFs,testPath,128);
readFile(localFs,testPath,511);
readFile(localFs,testPath,512);
readFile(localFs,testPath,513);
readFile(localFs,testPath,1023);
readFile(localFs,testPath,1024);
readFile(localFs,testPath,1025);
localFs.delete(localFs.getChecksumFile(testPath),true);
assertTrue("checksum deleted",!localFs.exists(localFs.getChecksumFile(testPath)));
FileUtil.copy(localFs,localFs.getChecksumFile(testPath11),localFs,localFs.getChecksumFile(testPath),false,true,localFs.getConf());
assertTrue("checksum exists",localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead=false;
try {
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ie) {
errorRead=true;
}
assertTrue("error reading",errorRead);
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing".equals(str));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testGetMountCurrentDirectory() throws Exception {
File currentDirectory=new File(".");
String workingDir=currentDirectory.getAbsoluteFile().getCanonicalPath();
DF df=new DF(new File(workingDir),0L);
String mountPath=df.getMount();
File mountDir=new File(mountPath);
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should exist.",mountDir.exists());
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should be directory.",mountDir.isDirectory());
assertTrue("Working dir [" + workingDir + "] should start with ["+ mountPath+ "].",workingDir.startsWith(mountPath));
}
TestCleaner BooleanVerifier HybridVerifier
@After public void after() throws IOException {
FileUtil.setWritable(test_root,true);
FileUtil.fullyDelete(test_root);
assertTrue(!test_root.exists());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testZeroCopyMmapCache() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
final int TEST_FILE_LENGTH=16385;
final int RANDOM_SEED=23453;
final String CONTEXT="testZeroCopyMmapCacheContext";
FSDataInputStream fsIn=null;
ByteBuffer results[]={null,null,null,null};
DistributedFileSystem fs=null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache();
cache.accept(new CountingVisitor(0,5,5,0));
results[0]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.seek(0);
results[1]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(new ExtendedBlockId(firstBlock.getBlockId(),firstBlock.getBlockPoolId()));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.hasMmap());
Assert.assertNull(replica.getEvictableTimeNs());
}
}
);
results[2]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
results[3]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
cache.accept(new CountingVisitor(3,5,2,0));
for ( ByteBuffer buffer : results) {
if (buffer != null) {
fsIn.releaseBuffer(buffer);
}
}
fsIn.close();
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
final MutableBoolean finished=new MutableBoolean(false);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
finished.setValue(evictableMmapped.isEmpty());
}
}
);
return finished.booleanValue();
}
}
,10,60000);
cache.accept(new CountingVisitor(0,-1,-1,-1));
fs.close();
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier
/**
* Remove the target directory after the getListing RPC
*/
@Test public void testTargetDeletionForListLocatedStatus() throws Exception {
LOG.info("Test Target Delete For listLocatedStatus");
RemoteIterator itor=fs.listLocatedStatus(TEST_PATH);
itor.next();
assertFalse(itor.hasNext());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename test where both src and dst are files
*/
@Test public void testDeletionOfDstFile() throws Exception {
Path src=getTestPath("testDeletionOfDstFile/dir/src");
Path dst=getTestPath("testDeletionOfDstFile/newdir/dst");
createFile(src);
createFile(dst);
final FSNamesystem namesystem=cluster.getNamesystem();
final long blocks=namesystem.getBlocksTotal();
final long fileCount=namesystem.getFilesTotal();
rename(src,dst,false,false,true,Rename.OVERWRITE);
Assert.assertEquals(blocks - 1,namesystem.getBlocksTotal());
Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal());
restartCluster(false);
int count=0;
boolean exception=true;
src=getTestPath("testDeletionOfDstFile/dir/src");
dst=getTestPath("testDeletionOfDstFile/newdir/dst");
while (exception && count < 5) {
try {
exists(fc,src);
exception=false;
}
catch ( Exception e) {
LOG.warn("Exception " + " count " + count + " "+ e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc,src));
Assert.assertTrue(exists(fc,dst));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename test where both src and dst are directories
*/
@Test public void testDeletionOfDstDirectory() throws Exception {
Path src=getTestPath("testDeletionOfDstDirectory/dir/src");
Path dst=getTestPath("testDeletionOfDstDirectory/newdir/dst");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
fc.mkdir(dst,FileContext.DEFAULT_PERM,true);
FSNamesystem namesystem=cluster.getNamesystem();
long fileCount=namesystem.getFilesTotal();
rename(src,dst,false,false,true,Rename.OVERWRITE);
Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal());
restartCluster(false);
src=getTestPath("testDeletionOfDstDirectory/dir/src");
dst=getTestPath("testDeletionOfDstDirectory/newdir/dst");
int count=0;
boolean exception=true;
while (exception && count < 5) {
try {
exists(fc,src);
exception=false;
}
catch ( Exception e) {
LOG.warn("Exception " + " count " + count + " "+ e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc,src));
Assert.assertTrue(exists(fc,dst));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteOnExit() throws Exception {
Path file1=helper.getTestRootPath(fc,"file1");
createFile(fc,file1,numBlocks,blockSize);
fc.deleteOnExit(file1);
checkDeleteOnExitData(1,fc,file1);
Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
Path file2=helper.getTestRootPath(fc,"dir1/file2");
createFile(fc,file2,numBlocks,blockSize);
fc.deleteOnExit(file2);
checkDeleteOnExitData(1,fc,file1,file2);
Path dir=helper.getTestRootPath(fc,"dir3/dir4/dir5/dir6");
createFile(fc,dir,numBlocks,blockSize);
fc.deleteOnExit(dir);
checkDeleteOnExitData(1,fc,file1,file2,dir);
FileContext.FINALIZER.run();
checkDeleteOnExitData(0,fc,new Path[0]);
Assert.assertFalse(exists(fc,file1));
Assert.assertFalse(exists(fc,file2));
Assert.assertFalse(exists(fc,dir));
}
InternalCallVerifier BooleanVerifier
/**
* Check that FileStatus are not equal if their paths are not equal.
*/
@Test public void testNotEquals(){
Path path1=new Path("path1");
Path path2=new Path("path2");
FileStatus fileStatus1=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path1);
FileStatus fileStatus2=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path2);
assertFalse(fileStatus1.equals(fileStatus2));
assertFalse(fileStatus2.equals(fileStatus1));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsUniqueness() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fs1=FileSystem.get(conf);
FileSystem fs2=FileSystem.get(conf);
assertTrue(fs1 == fs2);
fs1=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
fs2=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
assertTrue(fs1 != fs2 && !fs1.equals(fs2));
fs1.close();
fs2.close();
}
BooleanVerifier
@Test public void testDeleteOnExitFNF() throws IOException {
FileSystem mockFs=mock(FileSystem.class);
FileSystem fs=new FilterFileSystem(mockFs);
Path path=new Path("/a");
assertFalse(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
fs.close();
verify(mockFs,never()).getFileStatus(eq(path));
verify(mockFs,never()).delete(any(Path.class),anyBoolean());
}
BooleanVerifier
@Test public void testDeleteOnExit() throws IOException {
FileSystem mockFs=mock(FileSystem.class);
FileSystem fs=new FilterFileSystem(mockFs);
Path path=new Path("/a");
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
fs.close();
verify(mockFs).getFileStatus(eq(path));
verify(mockFs).delete(eq(path),eq(true));
}
BooleanVerifier
@Test public void testDeleteOnExitRemoved() throws IOException {
FileSystem mockFs=mock(FileSystem.class);
FileSystem fs=new FilterFileSystem(mockFs);
Path path=new Path("/a");
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
fs.close();
verify(mockFs).getFileStatus(eq(path));
verify(mockFs,never()).delete(any(Path.class),anyBoolean());
}
InternalCallVerifier BooleanVerifier
@Test public void testCancelDeleteOnExit() throws IOException {
FileSystem mockFs=mock(FileSystem.class);
FileSystem fs=new FilterFileSystem(mockFs);
Path path=new Path("/a");
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
assertTrue(fs.cancelDeleteOnExit(path));
assertFalse(fs.cancelDeleteOnExit(path));
reset(mockFs);
fs.close();
verify(mockFs,never()).getFileStatus(any(Path.class));
verify(mockFs,never()).delete(any(Path.class),anyBoolean());
}
BooleanVerifier
/**
* Check if FileSystem can be properly initialized if URLStreamHandlerFactory
* is registered.
*/
@Test public void testInitializationWithRegisteredStreamFactory(){
Configuration conf=new Configuration();
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory(conf));
try {
FileSystem.getFileSystemClass("file",conf);
}
catch ( IOException ok) {
assertFalse(false);
}
}
BooleanVerifier
/**
* Test that deletion of a symlink works as expected.
*/
@Test(timeout=30000) public void testSymlinkDelete() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
File file=new File(del,FILE);
file.createNewFile();
File link=new File(del,"_link");
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
Assert.assertTrue(link.delete());
Assert.assertFalse(link.exists());
Assert.assertTrue(file.exists());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testListAPI() throws IOException {
setupDirs();
String[] files=FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files",2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files",0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=30000) public void testFullyDelete() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
Assert.assertFalse(del.exists());
validateTmpDir();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
boolean ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4,del.list().length);
validateTmpDir();
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3,del.list().length);
validateTmpDir();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testUnZip() throws IOException {
setupDirs();
final File simpleZip=new File(del,FILE);
OutputStream os=new FileOutputStream(simpleZip);
ZipOutputStream tos=new ZipOutputStream(os);
try {
ZipEntry ze=new ZipEntry("foo");
byte[] data="some-content".getBytes("UTF-8");
ze.setSize(data.length);
tos.putNextEntry(ze);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
}
finally {
tos.close();
}
FileUtil.unZip(simpleZip,tmp);
assertTrue(new File(tmp,"foo").exists());
assertEquals(12,new File(tmp,"foo").length());
final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unZip(simpleZip,regularFile);
assertTrue("An IOException expected.",false);
}
catch ( IOException ioe) {
}
}
BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopyMergeSingleDirectory() throws IOException {
setupDirs();
boolean copyMergeResult=copyMerge("partitioned","tmp/merged");
Assert.assertTrue("Expected successful copyMerge result.",copyMergeResult);
File merged=new File(TEST_DIR,"tmp/merged");
Assert.assertTrue("File tmp/merged must exist after copyMerge.",merged.exists());
BufferedReader rdr=new BufferedReader(new FileReader(merged));
try {
Assert.assertEquals("Line 1 of merged file must contain \"foo\".","foo",rdr.readLine());
Assert.assertEquals("Line 2 of merged file must contain \"bar\".","bar",rdr.readLine());
Assert.assertNull("Expected end of file reading merged file.",rdr.readLine());
}
finally {
rdr.close();
}
}
BooleanVerifier
@Test(timeout=30000) public void testReplaceFile() throws IOException {
setupDirs();
final File srcFile=new File(tmp,"src");
srcFile.createNewFile();
assertTrue(srcFile.exists());
final File targetFile=new File(tmp,"target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile,targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
srcFile.createNewFile();
assertTrue(srcFile.exists());
FileUtil.replaceFile(srcFile,targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
srcFile.createNewFile();
assertTrue(srcFile.exists());
targetFile.delete();
targetFile.mkdirs();
File obstacle=new File(targetFile,"obstacle");
obstacle.createNewFile();
assertTrue(obstacle.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile,targetFile);
assertTrue(false);
}
catch ( IOException ioe) {
}
assertTrue(srcFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
assertTrue(obstacle.exists());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testStat2Paths2(){
Path defaultPath=new Path("file://default");
Path[] paths=FileUtil.stat2Paths(null,defaultPath);
assertEquals(1,paths.length);
assertEquals(defaultPath,paths[0]);
paths=FileUtil.stat2Paths(null,null);
assertTrue(paths != null);
assertEquals(1,paths.length);
assertEquals(null,paths[0]);
Path path1=new Path("file://foo");
Path path2=new Path("file://moo");
FileStatus[] fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)};
paths=FileUtil.stat2Paths(fileStatuses,defaultPath);
assertEquals(2,paths.length);
assertEquals(paths[0],path1);
assertEquals(paths[1],path2);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testSymlink() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data="testSymLink".getBytes();
File file=new File(del,FILE);
File link=new File(del,"_link");
FileOutputStream os=new FileOutputStream(file);
os.write(data);
os.close();
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertEquals(data.length,file.length());
Assert.assertEquals(data.length,link.length());
FileInputStream in=new FileInputStream(link);
long len=0;
while (in.read() > 0) {
len++;
}
in.close();
Assert.assertEquals(data.length,len);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testFullyDeleteContents() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
Assert.assertTrue(del.exists());
Assert.assertEquals(0,del.listFiles().length);
validateTmpDir();
}
BooleanVerifier
/**
* Test that rename on a symlink works as expected.
*/
@Test(timeout=30000) public void testSymlinkRenameTo() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
File file=new File(del,FILE);
file.createNewFile();
File link=new File(del,"_link");
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
File link2=new File(del,"_link2");
Assert.assertTrue(link.renameTo(link2));
Assert.assertTrue(file.exists());
Assert.assertTrue(link2.exists());
Assert.assertFalse(link.exists());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopy5() throws IOException {
setupDirs();
URI uri=tmp.toURI();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.newInstance(uri,conf);
final String content="some-content";
File srcFile=createFile(tmp,"src",content);
Path srcPath=new Path(srcFile.toURI());
final File dest=new File(del,"dest");
boolean result=FileUtil.copy(fs,srcPath,dest,false,conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length());
assertTrue(srcFile.exists());
dest.delete();
assertTrue(!dest.exists());
result=FileUtil.copy(fs,srcPath,dest,true,conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length());
assertTrue(!srcFile.exists());
dest.delete();
assertTrue(!dest.exists());
srcPath=new Path(partitioned.toURI());
result=FileUtil.copy(fs,srcPath,dest,true,conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files=dest.listFiles();
assertTrue(files != null);
assertEquals(2,files.length);
for ( File f : files) {
assertEquals(3 + System.getProperty("line.separator").getBytes().length,f.length());
}
assertTrue(!partitioned.exists());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests if fullyDelete deletes
* (a) dangling symlink to file properly
* (b) dangling symlink to directory properly
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteDanglingSymlinks() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDelete(tmp);
Assert.assertTrue(ret);
Assert.assertFalse(tmp.exists());
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertEquals(4,del.list().length);
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertEquals(3,del.list().length);
}
BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that length on a symlink works as expected.
*/
@Test(timeout=30000) public void testSymlinkLength() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data="testSymLinkData".getBytes();
File file=new File(del,FILE);
File link=new File(del,"_link");
FileOutputStream os=new FileOutputStream(file);
os.write(data);
os.close();
Assert.assertEquals(0,link.length());
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertEquals(data.length,file.length());
Assert.assertEquals(data.length,link.length());
file.delete();
Assert.assertFalse(file.exists());
if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
Assert.assertEquals(data.length,link.length());
}
else {
Assert.assertEquals(0,link.length());
}
link.delete();
Assert.assertFalse(link.exists());
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test(timeout=30000) public void testGetDU() throws Exception {
setupDirs();
long du=FileUtil.getDU(TEST_DIR);
final long expected=2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected,du);
final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist=FileUtil.getDU(doesNotExist);
assertEquals(0,duDoesNotExist);
File notADirectory=new File(partitioned,"part-r-00000");
long duNotADirectoryActual=FileUtil.getDU(notADirectory);
long duNotADirectoryExpected=3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected,duNotADirectoryActual);
try {
try {
FileUtil.chmod(notADirectory.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3=FileUtil.getDU(partitioned);
assertEquals(expected,du3);
try {
FileUtil.chmod(partitioned.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4=FileUtil.getDU(partitioned);
assertEquals(0,du4);
}
finally {
FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true);
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testListFiles() throws IOException {
setupDirs();
File[] files=FileUtil.listFiles(partitioned);
Assert.assertEquals(2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.listFiles(newDir);
Assert.assertEquals(0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=30000) public void testCreateLocalTempFile() throws IOException {
setupDirs();
final File baseFile=new File(tmp,"base");
File tmp1=FileUtil.createLocalTempFile(baseFile,"foo",false);
File tmp2=FileUtil.createLocalTempFile(baseFile,"foo",true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
tmp1.delete();
tmp2.delete();
assertTrue(!tmp1.exists() && !tmp2.exists());
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testUnTar() throws IOException {
setupDirs();
final File simpleTar=new File(del,FILE);
OutputStream os=new FileOutputStream(simpleTar);
TarOutputStream tos=new TarOutputStream(os);
try {
TarEntry te=new TarEntry("foo");
byte[] data="some-content".getBytes("UTF-8");
te.setSize(data.length);
tos.putNextEntry(te);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
}
finally {
tos.close();
}
FileUtil.unTar(simpleTar,tmp);
assertTrue(new File(tmp,"foo").exists());
assertEquals(12,new File(tmp,"foo").length());
final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unTar(simpleTar,regularFile);
assertTrue("An IOException expected.",false);
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCreateJarWithClassPath() throws Exception {
Assert.assertFalse(tmp.exists());
Assert.assertTrue(tmp.mkdirs());
List wildcardMatches=Arrays.asList(new File(tmp,"wildcard1.jar"),new File(tmp,"wildcard2.jar"),new File(tmp,"wildcard3.JAR"),new File(tmp,"wildcard4.JAR"));
for ( File wildcardMatch : wildcardMatches) {
Assert.assertTrue("failure creating file: " + wildcardMatch,wildcardMatch.createNewFile());
}
Assert.assertTrue(new File(tmp,"text.txt").createNewFile());
Assert.assertTrue(new File(tmp,"executable.exe").createNewFile());
Assert.assertTrue(new File(tmp,"README").createNewFile());
String wildcardPath=tmp.getCanonicalPath() + File.separator + "*";
String nonExistentSubdir=tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"+ Path.SEPARATOR;
List classPaths=Arrays.asList("","cp1.jar","cp2.jar",wildcardPath,"cp3.jar",nonExistentSubdir);
String inputClassPath=StringUtils.join(File.pathSeparator,classPaths);
String classPathJar=FileUtil.createJarWithClassPath(inputClassPath,new Path(tmp.getCanonicalPath()),System.getenv());
JarFile jarFile=null;
try {
jarFile=new JarFile(classPathJar);
Manifest jarManifest=jarFile.getManifest();
Assert.assertNotNull(jarManifest);
Attributes mainAttributes=jarManifest.getMainAttributes();
Assert.assertNotNull(mainAttributes);
Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr=mainAttributes.getValue(Attributes.Name.CLASS_PATH);
Assert.assertNotNull(classPathAttr);
List expectedClassPaths=new ArrayList();
for ( String classPath : classPaths) {
if (classPath.length() == 0) {
continue;
}
if (wildcardPath.equals(classPath)) {
for ( File wildcardMatch : wildcardMatches) {
expectedClassPaths.add(wildcardMatch.toURI().toURL().toExternalForm());
}
}
else {
File fileCp=null;
if (!new Path(classPath).isAbsolute()) {
fileCp=new File(tmp,classPath);
}
else {
fileCp=new File(classPath);
}
if (nonExistentSubdir.equals(classPath)) {
expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm() + Path.SEPARATOR);
}
else {
expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm());
}
}
}
List actualClassPaths=Arrays.asList(classPathAttr.split(" "));
Collections.sort(expectedClassPaths);
Collections.sort(actualClassPaths);
Assert.assertEquals(expectedClassPaths,actualClassPaths);
}
finally {
if (jarFile != null) {
try {
jarFile.close();
}
catch ( IOException e) {
LOG.warn("exception closing jarFile: " + classPathJar,e);
}
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRepresentsDir() throws Exception {
Path subdirDstPath=new Path(dstPath,srcPath.getName());
String argv[]=null;
lfs.delete(dstPath,true);
assertFalse(lfs.exists(dstPath));
argv=new String[]{"-put",srcPath.toString(),dstPath.toString()};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath));
lfs.delete(dstPath,true);
assertFalse(lfs.exists(dstPath));
lfs.delete(dstPath,true);
for ( String suffix : new String[]{"/","/."}) {
argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix};
assertEquals(1,shell.run(argv));
assertFalse(lfs.exists(dstPath));
assertFalse(lfs.exists(subdirDstPath));
}
for ( String suffix : new String[]{"/","/."}) {
lfs.delete(dstPath,true);
lfs.mkdirs(dstPath);
argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
String dotdotDst=dstPath + "/foo/..";
lfs.delete(dstPath,true);
lfs.mkdirs(new Path(dstPath,"foo"));
argv=new String[]{"-put",srcPath.toString(),dotdotDst};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test public void testMoveFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
Path testRoot=new Path(testRootDir,"testPutFile");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path target=new Path(testRoot,"target");
Path srcFile=new Path(testRoot,new Path("srcFile"));
lfs.createNewFile(srcFile);
String winSrcFile=(new File(srcFile.toUri().getPath().toString())).getAbsolutePath();
shellRun(0,"-moveFromLocal",winSrcFile,target.toString());
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyMerge() throws Exception {
Path root=new Path(testRootDir,"TestMerge");
Path f1=new Path(root,"f1");
Path f2=new Path(root,"f2");
Path f3=new Path(root,"f3");
Path fnf=new Path(root,"fnf");
Path d=new Path(root,"dir");
Path df1=new Path(d,"df1");
Path df2=new Path(d,"df2");
Path df3=new Path(d,"df3");
createFile(f1,f2,f3,df1,df2,df3);
int exit;
exit=shell.run(new String[]{"-getmerge",f1.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1",readFile("out"));
exit=shell.run(new String[]{"-getmerge",fnf.toString(),"out"});
assertEquals(1,exit);
assertFalse(lfs.exists(new Path("out")));
exit=shell.run(new String[]{"-getmerge",f1.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1f2",readFile("out"));
exit=shell.run(new String[]{"-getmerge",f2.toString(),f1.toString(),"out"});
assertEquals(0,exit);
assertEquals("f2f1",readFile("out"));
exit=shell.run(new String[]{"-getmerge","-nl",f1.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",new Path(root,"f*").toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\nf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",root.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\nf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",d.toString(),"out"});
assertEquals(0,exit);
assertEquals("df1\ndf2\ndf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",f1.toString(),d.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\ndf1\ndf2\ndf3\nf2\n",readFile("out"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveDirFromLocal() throws Exception {
Path testRoot=new Path(testRootDir,"testPutDir");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path srcDir=new Path(testRoot,"srcDir");
lfs.mkdirs(srcDir);
Path targetDir=new Path(testRoot,"target");
int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(targetDir));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveFileFromLocal() throws Exception {
Path testRoot=new Path(testRootDir,"testPutFile");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path target=new Path(testRoot,"target");
Path srcFile=new Path(testRoot,new Path("srcFile"));
lfs.createNewFile(srcFile);
int exit=shell.run(new String[]{"-moveFromLocal",srcFile.toString(),target.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before public void prepFiles() throws Exception {
lfs.setVerifyChecksum(true);
lfs.setWriteChecksum(true);
lfs.delete(srcPath,true);
lfs.delete(dstPath,true);
FSDataOutputStream out=lfs.create(srcPath);
out.writeChars("hi");
out.close();
assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveDirFromLocalDestExists() throws Exception {
Path testRoot=new Path(testRootDir,"testPutDir");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path srcDir=new Path(testRoot,"srcDir");
lfs.mkdirs(srcDir);
Path targetDir=new Path(testRoot,"target");
lfs.mkdirs(targetDir);
int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(new Path(targetDir,srcDir.getName())));
lfs.mkdirs(srcDir);
exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(1,exit);
assertTrue(lfs.exists(srcDir));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
try {
int exit=shell.run(new String[]{"-rm","-f","nomatch*"});
assertEquals(0,exit);
assertTrue(bytes.toString().isEmpty());
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier
/**
* Test Chown 1. Create and write file on FS 2. Verify that exit code for
* Chown on existing file is 0 3. Verify that exit code for Chown on
* non-existing file is 1 4. Verify that exit code for Chown with glob input
* on non-existing file is 1 5. Verify that exit code for Chown with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChown() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChown/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChown/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChown/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChown/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChown/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChown/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChown/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
change(0,"admin",null,f1);
change(1,"admin",null,f2);
change(1,"admin",null,f3);
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
change(0,"admin",null,f7);
change(0,"admin","Test",f1);
change(0,"admin","",f1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy");
fileSys.delete(tdir,true);
fileSys.mkdirs(tdir);
String[] args=new String[3];
args[0]="-get";
args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString();
args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString();
assertTrue("file exists",!fileSys.exists(new Path(args[1])));
assertTrue("file exists",!fileSys.exists(new Path(args[2])));
int run=shell.run(args);
results=bytes.toString();
assertEquals("Return code should be 1",1,run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory"));
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testInvalidDefaultFS() throws Exception {
FsShell shell=new FsShell();
Configuration conf=new Configuration();
conf.set(FS_DEFAULT_NAME_KEY,"hhhh://doesnotexist/");
shell.setConf(conf);
String[] args=new String[2];
args[0]="-ls";
args[1]="file:///";
int res=shell.run(args);
System.out.println("res =" + res);
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
int run=shell.run(args);
results=bytes.toString();
LOG.info("result=" + results);
assertTrue("Return code should be 0",run == 0);
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
final String results;
try {
int exit=shell.run(new String[]{"-rm","nomatch*"});
assertEquals(1,exit);
results=bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test Chmod 1. Create and write file on FS 2. Verify that exit code for
* chmod on existing file is 0 3. Verify that exit code for chmod on
* non-existing file is 1 4. Verify that exit code for chmod with glob input
* on non-existing file is 1 5. Verify that exit code for chmod with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChmod() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
String argv[]={"-chmod","777",f1};
assertEquals(0,fsShell.run(argv));
String argv2[]={"-chmod","777",f2};
assertEquals(1,fsShell.run(argv2));
String argv3[]={"-chmod","777",f3};
assertEquals(1,fsShell.run(argv3));
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
String argv4[]={"-chmod","777",f7};
assertEquals(0,fsShell.run(argv4));
}
InternalCallVerifier BooleanVerifier
/**
* Test Chgrp 1. Create and write file on FS 2. Verify that exit code for
* chgrp on existing file is 0 3. Verify that exit code for chgrp on
* non-existing file is 1 4. Verify that exit code for chgrp with glob input
* on non-existing file is 1 5. Verify that exit code for chgrp with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChgrp() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChgrp/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChgrp/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChgrp/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChgrp/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChgrp/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChgrp/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChgrp/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
change(0,null,"admin",f1);
change(1,null,"admin",f2);
change(1,null,"admin",f2,f1);
change(1,null,"admin",f3);
change(1,null,"admin",f3,f1);
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
change(0,null,"admin",f7);
change(1,null,"admin",f2,f7);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testInterrupt() throws Exception {
MyFsShell shell=new MyFsShell();
shell.setConf(new Configuration());
final Path d=new Path(TEST_ROOT_DIR,"testInterrupt");
final Path f1=new Path(d,"f1");
final Path f2=new Path(d,"f2");
assertTrue(fileSys.mkdirs(d));
writeFile(fileSys,f1);
assertTrue(fileSys.isFile(f1));
writeFile(fileSys,f2);
assertTrue(fileSys.isFile(f2));
int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()});
assertEquals(1,InterruptCommand.processed);
assertEquals(130,exitCode);
exitCode=shell.run(new String[]{"-testInterrupt",d.toString()});
assertEquals(2,InterruptCommand.processed);
assertEquals(130,exitCode);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void pTestCurlyBracket() throws IOException {
Path[] matchedPath;
String[] files;
try {
files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"};
matchedPath=prepareTesting(USER_DIR + "/a.{abc,jh}??",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abdxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"};
matchedPath=prepareTesting(USER_DIR + "/a.{ab{c,d},jh}??",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
assertEquals(matchedPath[2],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/a/b",USER_DIR + "/a/d",USER_DIR + "/c/b",USER_DIR + "/c/d"};
matchedPath=prepareTesting(USER_DIR + "/{a/b,c/d}",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{"/a/b","/a/d","/c/b","/c/d"};
matchedPath=prepareTesting("{/a/b,/c/d}",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/}bc",USER_DIR + "/}c"};
matchedPath=prepareTesting(USER_DIR + "/}{a,b}c",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{b}c",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{}bc",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{,}bc",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{b,}c",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
matchedPath=prepareTesting(USER_DIR + "/}{,b}c",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
matchedPath=prepareTesting(USER_DIR + "/}{ac,?}",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[1]);
boolean hasException=false;
try {
prepareTesting(USER_DIR + "}{bc",files);
}
catch ( IOException e) {
assertTrue(e.getMessage().startsWith("Illegal file pattern:"));
hasException=true;
}
assertTrue(hasException);
}
finally {
cleanupDFS();
}
}
InternalCallVerifier BooleanVerifier
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test public void testEditsLogRename() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
Path src1=getTestRootPath(fc,"testEditsLogRename/srcdir/src1");
Path dst1=getTestRootPath(fc,"testEditsLogRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
fs.setQuota(dst1.getParent(),2,HdfsConstants.QUOTA_DONT_SET);
fs.delete(dst1,true);
rename(src1,dst1,true,true,false,Rename.OVERWRITE);
restartCluster();
fs=cluster.getFileSystem();
src1=getTestRootPath(fc,"testEditsLogRename/srcdir/src1");
dst1=getTestRootPath(fc,"testEditsLogRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1));
Assert.assertTrue(fs.exists(dst1));
}
InternalCallVerifier BooleanVerifier
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test public void testEditsLogOldRename() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
Path src1=getTestRootPath(fc,"testEditsLogOldRename/srcdir/src1");
Path dst1=getTestRootPath(fc,"testEditsLogOldRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
fs.setQuota(dst1.getParent(),2,HdfsConstants.QUOTA_DONT_SET);
fs.delete(dst1,true);
oldRename(src1,dst1,true,false);
restartCluster();
fs=cluster.getFileSystem();
src1=getTestRootPath(fc,"testEditsLogOldRename/srcdir/src1");
dst1=getTestRootPath(fc,"testEditsLogOldRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1));
Assert.assertTrue(fs.exists(dst1));
}
BooleanVerifier
@Test public void testIsValidNameInvalidNames(){
String[] invalidNames={"/foo/../bar","/foo/./bar","/foo/:/bar","/foo:bar"};
for ( String invalidName : invalidNames) {
Assert.assertFalse(invalidName + " is not valid",fc.getDefaultFileSystem().isValidName(invalidName));
}
}
BooleanVerifier
@Test public void testInheritedMethodsImplemented() throws Exception {
int errors=0;
for ( Method m : FileSystem.class.getDeclaredMethods()) {
if (Modifier.isStatic(m.getModifiers()) || Modifier.isPrivate(m.getModifiers()) || Modifier.isFinal(m.getModifiers())) {
continue;
}
try {
MustNotImplement.class.getMethod(m.getName(),m.getParameterTypes());
try {
HarFileSystem.class.getDeclaredMethod(m.getName(),m.getParameterTypes());
LOG.error("HarFileSystem MUST not implement " + m);
errors++;
}
catch ( NoSuchMethodException ex) {
}
}
catch ( NoSuchMethodException exc) {
try {
HarFileSystem.class.getDeclaredMethod(m.getName(),m.getParameterTypes());
}
catch ( NoSuchMethodException exc2) {
LOG.error("HarFileSystem MUST implement " + m);
errors++;
}
}
}
assertTrue((errors + " methods were not overridden correctly - see log"),errors <= 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveListFilesNotEndInColon() throws Exception {
final URI uri=new URI("har://file-localhost" + harPath.toString());
harFileSystem.initialize(uri,conf);
Path p1=new Path("har://file-localhost" + harPath.toString());
Path p2=harFileSystem.makeQualified(p1);
assertTrue(p2.toUri().toString().startsWith("har://file-localhost/"));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListLocatedStatus() throws Exception {
String testHarPath=this.getClass().getResource("/test.har").getPath();
URI uri=new URI("har://" + testHarPath);
HarFileSystem hfs=new HarFileSystem(localFileSystem);
hfs.initialize(uri,new Configuration());
Set expectedFileNames=new HashSet();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
Path path=new Path("dir1");
RemoteIterator fileList=hfs.listLocatedStatus(path);
while (fileList.hasNext()) {
String fileName=fileList.next().getPath().getName();
assertTrue(fileName + " not in expected files list",expectedFileNames.contains(fileName));
expectedFileNames.remove(fileName);
}
assertEquals("Didn't find all of the expected file names: " + expectedFileNames,0,expectedFileNames.size());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testNegativeInitWithAnUnsupportedVersion() throws Exception {
Thread.sleep(1000);
writeVersionToMasterIndexImpl(7777,new Path(harPath,"_masterindex"));
final HarFileSystem hfs=new HarFileSystem(localFileSystem);
assertFalse(hfs.getMetadata() == harFileSystem.getMetadata());
final URI uri=new URI("har://" + harPath.toString());
try {
hfs.initialize(uri,new Configuration());
Assert.fail("IOException expected.");
}
catch ( IOException ioe) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveNewHarFsOnTheSameUnderlyingFs() throws Exception {
final HarFileSystem hfs=new HarFileSystem(localFileSystem);
final URI uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMakeQualifiedPath() throws Exception {
String harPathWithUserinfo="har://file-user:passwd@localhost:80" + harPath.toUri().getPath().toString();
Path path=new Path(harPathWithUserinfo);
Path qualifiedPath=path.getFileSystem(conf).makeQualified(path);
assertTrue(String.format("The qualified path (%s) did not match the expected path (%s).",qualifiedPath.toString(),harPathWithUserinfo),qualifiedPath.toString().equals(harPathWithUserinfo));
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveLruMetadataCacheFs() throws Exception {
HarFileSystem hfs=new HarFileSystem(localFileSystem);
URI uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
for (int i=0; i <= hfs.METADATA_CACHE_ENTRIES_DEFAULT; i++) {
Path p=new Path(rootPath,"path1/path2/my" + i + ".har");
createHarFileSystem(conf,p);
}
hfs=new HarFileSystem(localFileSystem);
uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() != harFileSystem.getMetadata());
}
TestInitializer BooleanVerifier HybridVerifier
/**
* Initialize clean environment for start of each test
*/
@Before public void setupDirs() throws IOException {
assertFalse(src.exists());
assertFalse(tgt_one.exists());
assertFalse(tgt_mult.exists());
src.mkdirs();
tgt_one.mkdirs();
tgt_mult.mkdirs();
makeNonEmptyFile(x1,str1);
makeNonEmptyFile(x2,str2);
makeNonEmptyFile(x3,str3);
validateSetup();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWindowsSyntax(){
class win extends HardLinkCGWin {
}
;
assertEquals(5,win.hardLinkCommand.length);
assertEquals(7,win.hardLinkMultPrefix.length);
assertEquals(7,win.hardLinkMultSuffix.length);
assertEquals(4,win.getLinkCountCommand.length);
assertTrue(win.hardLinkMultPrefix[4].equals("%f"));
assertEquals(2,("%f").length());
assertTrue(win.hardLinkMultDir.equals("\\%f"));
assertEquals(3,("\\%f").length());
assertTrue(win.getLinkCountCommand[1].equals("hardlink"));
assertEquals(4,("-c%h").length());
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the single-file method HardLink.createHardLink().
* Also tests getLinkCount() with values greater than one.
*/
@Test public void testCreateHardLink() throws IOException {
createHardLink(x1,x1_one);
assertTrue(x1_one.exists());
assertEquals(2,getLinkCount(x1));
assertEquals(2,getLinkCount(x1_one));
assertEquals(1,getLinkCount(x2));
createHardLink(x2,y_one);
createHardLink(x3,x3_one);
assertEquals(2,getLinkCount(x2));
assertEquals(2,getLinkCount(x3));
createHardLink(x1,x11_one);
assertEquals(3,getLinkCount(x1));
assertEquals(3,getLinkCount(x1_one));
assertEquals(3,getLinkCount(x11_one));
validateTgtOne();
appendToFile(x1_one,str3);
assertTrue(fetchFileContents(x1_one).equals(str1 + str3));
assertTrue(fetchFileContents(x11_one).equals(str1 + str3));
assertTrue(fetchFileContents(x1).equals(str1 + str3));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateHardLinkMult() throws IOException {
String[] fileNames=src.list();
createHardLinkMult(src,fileNames,tgt_mult);
assertEquals(2,getLinkCount(x1));
assertEquals(2,getLinkCount(x2));
assertEquals(2,getLinkCount(x3));
assertEquals(2,getLinkCount(x1_mult));
assertEquals(2,getLinkCount(x2_mult));
assertEquals(2,getLinkCount(x3_mult));
validateTgtMult();
appendToFile(x1_mult,str3);
assertTrue(fetchFileContents(x1_mult).equals(str1 + str3));
assertTrue(fetchFileContents(x1).equals(str1 + str3));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test createHardLinkMult(), again, this time with the "too long list"
* case where the total size of the command line arguments exceed the
* allowed maximum. In this case, the list should be automatically
* broken up into chunks, each chunk no larger than the max allowed.
* We use an extended version of the method call, specifying the
* size limit explicitly, to simulate the "too long" list with a
* relatively short list.
*/
@Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException {
String name1="x11111111";
String name2="x22222222";
String name3="x33333333";
File x1_long=new File(src,name1);
File x2_long=new File(src,name2);
File x3_long=new File(src,name3);
x1.renameTo(x1_long);
x2.renameTo(x2_long);
x3.renameTo(x3_long);
assertTrue(x1_long.exists());
assertTrue(x2_long.exists());
assertTrue(x3_long.exists());
assertFalse(x1.exists());
assertFalse(x2.exists());
assertFalse(x3.exists());
int callCount;
String[] emptyList={};
String[] fileNames=src.list();
int overhead=getLinkMultArgLength(src,emptyList,tgt_mult);
int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(2,callCount);
String[] tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
FileUtil.fullyDelete(tgt_mult);
assertFalse(tgt_mult.exists());
tgt_mult.mkdirs();
assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0);
maxLength=overhead + (int)(0.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(3,callCount);
tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fs.mkdirs(TEST_DIR);
writeFile(fs,FILE1,FILE_LEN);
RemoteIterator itor=fs.listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
fs.delete(FILE1,true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a directory
*/
@Test public void testDirectory() throws IOException {
fs.mkdirs(DIR1);
RemoteIterator itor=fs.listFiles(DIR1,true);
assertFalse(itor.hasNext());
itor=fs.listFiles(DIR1,false);
assertFalse(itor.hasNext());
writeFile(fs,FILE2,FILE_LEN);
itor=fs.listFiles(DIR1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(DIR1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
writeFile(fs,FILE1,FILE_LEN);
writeFile(fs,FILE3,FILE_LEN);
Set filesToFind=new HashSet();
filesToFind.add(fs.makeQualified(FILE1));
filesToFind.add(fs.makeQualified(FILE2));
filesToFind.add(fs.makeQualified(FILE3));
itor=fs.listFiles(TEST_DIR,true);
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
assertFalse(itor.hasNext());
assertTrue(filesToFind.isEmpty());
itor=fs.listFiles(TEST_DIR,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
fs.delete(TEST_DIR,true);
}
APIUtilityVerifier BooleanVerifier
/**
* Two buffer dirs. The first dir exists & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test(timeout=30000) public void testROBufferDirAndRWBufferDir() throws Exception {
if (isWindows) return;
String dir1=buildBufferDir(ROOT,1);
String dir2=buildBufferDir(ROOT,2);
try {
conf.set(CONTEXT,dir1 + "," + dir2);
assertTrue(localFs.mkdirs(new Path(dir2)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testCreateManyFiles() throws Exception {
if (isWindows) return;
String dir5=buildBufferDir(ROOT,5);
String dir6=buildBufferDir(ROOT,6);
try {
conf.set(CONTEXT,dir5 + "," + dir6);
assertTrue(localFs.mkdirs(new Path(dir5)));
assertTrue(localFs.mkdirs(new Path(dir6)));
int inDir5=0, inDir6=0;
for (int i=0; i < TRIALS; ++i) {
File result=createTempFile();
if (result.getPath().startsWith(new Path(dir5,FILENAME).toUri().getPath())) {
inDir5++;
}
else if (result.getPath().startsWith(new Path(dir6,FILENAME).toUri().getPath())) {
inDir6++;
}
result.delete();
}
assertTrue(inDir5 + inDir6 == TRIALS);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)}
* returns correct filenames and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir0)));
assertTrue(localFs.mkdirs(new Path(dir1)));
localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME));
localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME));
final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
int count=0;
for ( final Path p : pathIterable) {
count++;
assertEquals(FILENAME,p.getName());
assertEquals("file",p.getFileSystem(conf).getUri().getScheme());
}
assertEquals(2,count);
try {
Path p=pathIterable.iterator().next();
assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true);
}
catch ( NoSuchElementException nsee) {
}
final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
final Iterator it=pathIterable2.iterator();
try {
it.remove();
assertFalse(true);
}
catch ( UnsupportedOperationException uoe) {
}
}
finally {
Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT});
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test(timeout=30000) public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
File result=dirAllocator.createTmpFileForWrite(FILENAME,-1,conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test getLocalPathToRead() returns correct filename and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetLocalPathToRead() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
assertTrue(localFs.mkdirs(new Path(dir)));
File f1=dirAllocator.createTmpFileForWrite(FILENAME,SMALL_FILE_SIZE,conf);
Path p1=dirAllocator.getLocalPathToRead(f1.getName(),conf);
assertEquals(f1.getName(),p1.getName());
assertEquals("file",p1.getFileSystem(conf).getUri().getScheme());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Two buffer dirs. Both exists and on a R/W disk.
* Later disk1 becomes read-only.
* @throws Exception
*/
@Test(timeout=30000) public void testRWBufferDirBecomesRO() throws Exception {
if (isWindows) return;
String dir3=buildBufferDir(ROOT,3);
String dir4=buildBufferDir(ROOT,4);
try {
conf.set(CONTEXT,dir3 + "," + dir4);
assertTrue(localFs.mkdirs(new Path(dir3)));
assertTrue(localFs.mkdirs(new Path(dir4)));
createTempFile(SMALL_FILE_SIZE);
int nextDirIdx=(dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
validateTempDirCreation(buildBufferDir(ROOT,nextDirIdx));
new File(new Path(dir4).toUri().getPath()).setReadOnly();
validateTempDirCreation(dir3);
validateTempDirCreation(dir3);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException {
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false);
try {
localFs.getFileStatus(p2.getParent());
}
catch ( Exception e) {
assertEquals(e.getClass(),FileNotFoundException.class);
}
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
BooleanVerifier
@Test(timeout=30000) public void testRemoveContext() throws IOException {
String dir=buildBufferDir(ROOT,0);
try {
String contextCfgItemName="application_1340842292563_0004.app.cache.dirs";
conf.set(contextCfgItemName,dir);
LocalDirAllocator localDirAllocator=new LocalDirAllocator(contextCfgItemName);
localDirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(LocalDirAllocator.isContextValid(contextCfgItemName));
LocalDirAllocator.removeContext(contextCfgItemName);
assertFalse(LocalDirAllocator.isContextValid(contextCfgItemName));
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier BooleanVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test(timeout=30000) public void test0() throws Exception {
if (isWindows) return;
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
BooleanVerifier
@Test public void testFileContextNoCache() throws UnsupportedFileSystemException {
FileContext fc1=FileContext.getLocalFSFileContext();
Assert.assertTrue(fc1 != fc);
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1=new File(base,"dir1");
final File dir2=new File(dir1,"dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName="corruptedData";
final Path dataPath=new Path(new File(dir2,dataFileName).toURI());
final Path checksumPath=fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos=fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
}
finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength=fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength=fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
FileUtil.setWritable(base,false);
FSDataInputStream dataFsdis=fileSys.open(dataPath);
FSDataInputStream checksumFsdis=fileSys.open(checksumPath);
boolean retryIsNecessary=fileSys.reportChecksumFailure(dataPath,dataFsdis,0,checksumFsdis,0);
assertTrue(!retryIsNecessary);
assertTrue(!fileSys.pathToFile(dataPath).exists());
assertTrue(!fileSys.pathToFile(checksumPath).exists());
File[] dir1files=dir1.listFiles(new FileFilter(){
@Override public boolean accept( File pathname){
return pathname != null && !pathname.getName().equals("dir2");
}
}
);
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir=dir1files[0];
File[] badFiles=badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound=false;
boolean checksumFileFound=false;
for ( File badFile : badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound=true;
}
else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound=true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
InternalCallVerifier BooleanVerifier
/**
* Tests that renaming a directory replaces the destination if the destination
* is an existing empty directory.
* Before:
* /dir1
* /file1
* /file2
* /dir2
* After rename("/dir1", "/dir2"):
* /dir2
* /file1
* /file2
*/
@Test public void testRenameReplaceExistingEmptyDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1");
Path dst=new Path(TEST_ROOT_DIR,"dir2");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys,new Path(src,"file1"),1);
writeFile(fileSys,new Path(src,"file2"),1);
assertTrue(fileSys.mkdirs(dst));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst,"file1")));
assertTrue(fileSys.exists(new Path(dst,"file2")));
assertFalse(fileSys.exists(src));
}
TestCleaner BooleanVerifier HybridVerifier
@After public void after() throws IOException {
FileUtil.setWritable(base,true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
InternalCallVerifier BooleanVerifier
/**
* Test deleting a file, directory, and non-existent path
*/
@Test(timeout=1000) public void testBasicDelete() throws IOException {
Path dir1=new Path(TEST_ROOT_DIR,"dir1");
Path file1=new Path(TEST_ROOT_DIR,"file1");
Path file2=new Path(TEST_ROOT_DIR + "/dir1","file2");
Path file3=new Path(TEST_ROOT_DIR,"does-not-exist");
assertTrue(fileSys.mkdirs(dir1));
writeFile(fileSys,file1,1);
writeFile(fileSys,file2,1);
assertFalse("Returned true deleting non-existant path",fileSys.delete(file3));
assertTrue("Did not delete file",fileSys.delete(file1));
assertTrue("Did not delete non-empty dir",fileSys.delete(dir1));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testCreateFileAndMkdirs() throws IOException {
Path test_dir=new Path(TEST_ROOT_DIR,"test_dir");
Path test_file=new Path(test_dir,"file1");
assertTrue(fileSys.mkdirs(test_dir));
final int fileSize=new Random().nextInt(1 << 20) + 1;
writeFile(fileSys,test_file,fileSize);
{
final FileStatus status=fileSys.getFileStatus(test_file);
Assert.assertEquals(fileSize,status.getLen());
final ContentSummary summary=fileSys.getContentSummary(test_dir);
Assert.assertEquals(fileSize,summary.getLength());
}
Path bad_dir=new Path(test_file,"another_dir");
try {
fileSys.mkdirs(bad_dir);
fail("Failed to detect existing file in path");
}
catch ( ParentNotDirectoryException e) {
}
try {
fileSys.mkdirs(null);
fail("Failed to detect null in mkdir arg");
}
catch ( IllegalArgumentException e) {
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests a simple rename of a directory.
*/
@Test public void testRenameDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1");
Path dst=new Path(TEST_ROOT_DIR,"dir2");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertFalse(fileSys.exists(src));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testCopy() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dingo");
Path dst=new Path(TEST_ROOT_DIR,"yak");
writeFile(fileSys,src,1);
assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,false,conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf));
assertTrue(fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,true,conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
fileSys.mkdirs(src);
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf));
Path tmp=new Path(src,dst.getName());
assertTrue(fileSys.exists(tmp) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,true,conf));
assertTrue(fileSys.delete(tmp,true));
fileSys.mkdirs(tmp);
try {
FileUtil.copy(fileSys,dst,fileSys,src,true,true,conf);
fail("Failed to detect existing dir");
}
catch ( IOException e) {
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests that renaming a directory to an existing directory that is not empty
* results in a full copy of source to destination.
* Before:
* /dir1
* /dir2
* /dir3
* /file1
* /file2
* After rename("/dir1/dir2/dir3", "/dir1"):
* /dir1
* /dir3
* /file1
* /file2
*/
@Test public void testRenameMoveToExistingNonEmptyDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1/dir2/dir3");
Path dst=new Path(TEST_ROOT_DIR,"dir1");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys,new Path(src,"file1"),1);
writeFile(fileSys,new Path(src,"file2"),1);
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst,"dir3")));
assertTrue(fileSys.exists(new Path(dst,"dir3/file1")));
assertTrue(fileSys.exists(new Path(dst,"dir3/file2")));
assertFalse(fileSys.exists(src));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the capability of setting the working directory.
*/
@Test(timeout=10000) public void testWorkingDirectory() throws IOException {
Path origDir=fileSys.getWorkingDirectory();
Path subdir=new Path(TEST_ROOT_DIR,"new");
try {
assertTrue(!fileSys.exists(subdir));
assertTrue(fileSys.mkdirs(subdir));
assertTrue(fileSys.isDirectory(subdir));
fileSys.setWorkingDirectory(subdir);
Path dir1=new Path("dir1");
assertTrue(fileSys.mkdirs(dir1));
assertTrue(fileSys.isDirectory(dir1));
fileSys.delete(dir1,true);
assertTrue(!fileSys.exists(dir1));
Path file1=new Path("file1");
Path file2=new Path("sub/file2");
String contents=writeFile(fileSys,file1,1);
fileSys.copyFromLocalFile(file1,file2);
assertTrue(fileSys.exists(file1));
assertTrue(fileSys.isFile(file1));
cleanupFile(fileSys,file2);
fileSys.copyToLocalFile(file1,file2);
cleanupFile(fileSys,file2);
fileSys.rename(file1,file2);
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
fileSys.rename(file2,file1);
InputStream stm=fileSys.open(file1);
byte[] buffer=new byte[3];
int bytesRead=stm.read(buffer,0,3);
assertEquals(contents,new String(buffer,0,bytesRead));
stm.close();
}
finally {
fileSys.setWorkingDirectory(origDir);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testSetTimes() throws Exception {
Path path=new Path(TEST_ROOT_DIR,"set-times");
writeFile(fileSys,path,1);
long newModTime=12345000;
FileStatus status=fileSys.getFileStatus(path);
assertTrue("check we're actually changing something",newModTime != status.getModificationTime());
long accessTime=status.getAccessTime();
fileSys.setTimes(path,newModTime,-1);
status=fileSys.getFileStatus(path);
assertEquals(newModTime,status.getModificationTime());
assertEquals(accessTime,status.getAccessTime());
}
BooleanVerifier
@Test(timeout=30000) public void testToString(){
toStringTest("/");
toStringTest("/foo");
toStringTest("/foo/bar");
toStringTest("foo");
toStringTest("foo/bar");
toStringTest("/foo/bar#boo");
toStringTest("foo/bar#boo");
boolean emptyException=false;
try {
toStringTest("");
}
catch ( IllegalArgumentException e) {
emptyException=true;
}
assertTrue(emptyException);
if (Path.WINDOWS) {
toStringTest("c:");
toStringTest("c:/");
toStringTest("c:foo");
toStringTest("c:foo/bar");
toStringTest("c:foo/bar");
toStringTest("c:/foo/bar");
toStringTest("C:/foo/bar#boo");
toStringTest("C:foo/bar#boo");
}
}
BooleanVerifier
@Test(timeout=30000) public void testIsWindowsAbsolutePath(){
if (!Shell.WINDOWS) return;
assertTrue(Path.isWindowsAbsolutePath("C:\\test",false));
assertTrue(Path.isWindowsAbsolutePath("C:/test",false));
assertTrue(Path.isWindowsAbsolutePath("/C:/test",true));
assertFalse(Path.isWindowsAbsolutePath("/test",false));
assertFalse(Path.isWindowsAbsolutePath("/test",true));
assertFalse(Path.isWindowsAbsolutePath("C:test",false));
assertFalse(Path.isWindowsAbsolutePath("/C:test",true));
}
BooleanVerifier
@Test(timeout=30000) public void testEquals(){
assertFalse(new Path("/").equals(new Path("/foo")));
}
BranchVerifier BooleanVerifier
@Test(timeout=30000) public void testIsAbsolute(){
assertTrue(new Path("/").isAbsolute());
assertTrue(new Path("/foo").isAbsolute());
assertFalse(new Path("foo").isAbsolute());
assertFalse(new Path("foo/bar").isAbsolute());
assertFalse(new Path(".").isAbsolute());
if (Path.WINDOWS) {
assertTrue(new Path("c:/a/b").isAbsolute());
assertFalse(new Path("c:a/b").isAbsolute());
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testURI() throws URISyntaxException, IOException {
URI uri=new URI("file:///bar#baz");
Path path=new Path(uri);
assertTrue(uri.equals(new URI(path.toString())));
FileSystem fs=path.getFileSystem(new Configuration());
assertTrue(uri.equals(new URI(fs.makeQualified(path).toString())));
URI uri2=new URI("file:///bar/baz");
assertTrue(uri2.equals(new URI(fs.makeQualified(new Path(uri2)).toString())));
assertEquals("foo://bar/baz#boo",new Path("foo://bar/",new Path(new URI("/baz#boo"))).toString());
assertEquals("foo://bar/baz/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("fud#boo"))).toString());
assertEquals("foo://bar/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("/fud#boo"))).toString());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=30000) public void testGlobEscapeStatus() throws Exception {
if (Shell.WINDOWS) return;
FileSystem lfs=FileSystem.getLocal(new Configuration());
Path testRoot=lfs.makeQualified(new Path(System.getProperty("test.build.data","test/build/data"),"testPathGlob"));
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
assertTrue(lfs.isDirectory(testRoot));
lfs.setWorkingDirectory(testRoot);
Path paths[]=new Path[]{new Path(testRoot,"*/f"),new Path(testRoot,"d1/f"),new Path(testRoot,"d2/f")};
Arrays.sort(paths);
for ( Path p : paths) {
lfs.create(p).close();
assertTrue(lfs.exists(p));
}
FileStatus stats[]=lfs.listStatus(new Path(testRoot,"*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"*"));
Arrays.sort(stats);
Path parentPaths[]=new Path[paths.length];
for (int i=0; i < paths.length; i++) {
parentPaths[i]=paths[i].getParent();
}
assertEquals(mergeStatuses(parentPaths),mergeStatuses(stats));
stats=lfs.globStatus(new Path(testRoot,"\\*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"*/f"));
assertEquals(paths.length,stats.length);
assertEquals(mergeStatuses(paths),mergeStatuses(stats));
stats=lfs.globStatus(new Path(testRoot,"\\*/f"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"\\*/*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testStat() throws Exception {
Assume.assumeTrue(Stat.isAvailable());
FileSystem fs=FileSystem.getLocal(new Configuration());
Path testDir=new Path(getTestRootPath(fs),"teststat");
fs.mkdirs(testDir);
Path sub1=new Path(testDir,"sub1");
Path sub2=new Path(testDir,"sub2");
fs.mkdirs(sub1);
fs.createSymlink(sub1,sub2,false);
FileStatus stat1=new Stat(sub1,4096l,false,fs).getFileStatus();
FileStatus stat2=new Stat(sub2,0,false,fs).getFileStatus();
assertTrue(stat1.isDirectory());
assertFalse(stat2.isDirectory());
fs.delete(testDir,true);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=10000) public void testRecoverLease() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
boolean closed=dfs.recoverLease(link);
assertTrue("Expected recoverLease to return true",closed);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=10000) public void testIsFileClosed() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
boolean closed=dfs.isFileClosed(link);
assertTrue("Expected isFileClosed to return true",closed);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
Path fileAbs=new Path(testBaseDir1() + "/file");
Path fileQual=new Path(testURI().toString(),fileAbs);
Path link=new Path(testBaseDir1() + "/linkToFile");
Path linkQual=new Path(testURI().toString(),link.toString());
wrapper.createSymlink(fileAbs,link,false);
FileUtil.fullyDelete(new File(link.toUri().getPath()));
wrapper.createSymlink(fileAbs,link,false);
try {
wrapper.getFileStatus(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
UserGroupInformation user=UserGroupInformation.getCurrentUser();
FileStatus fsd=wrapper.getFileLinkStatus(link);
assertEquals(fileQual,fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
assertEquals(user.getUserName(),fsd.getOwner());
assertEquals(user.getGroupNames()[0],fsd.getGroup());
assertEquals(linkQual,fsd.getPath());
try {
readFile(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
createAndWriteFile(fileAbs);
wrapper.getFileStatus(link);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Override @Test(timeout=10000) public void testRenameSymlinkToItself() throws IOException {
Path file=new Path(testBaseDir1(),"file");
createAndWriteFile(file);
Path link=new Path(testBaseDir1(),"linkToFile1");
wrapper.createSymlink(file,link,false);
try {
wrapper.rename(link,link);
fail("Failed to get expected IOException");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
try {
wrapper.rename(link,link,Rename.OVERWRITE);
fail("Failed to get expected IOException");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException || unwrapException(e) instanceof FileNotFoundException);
}
}
BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testXAttrEquals(){
assertNotSame(XATTR1,XATTR2);
assertNotSame(XATTR2,XATTR3);
assertNotSame(XATTR3,XATTR4);
assertNotSame(XATTR4,XATTR5);
assertEquals(XATTR,XATTR1);
assertEquals(XATTR1,XATTR1);
assertEquals(XATTR2,XATTR2);
assertEquals(XATTR3,XATTR3);
assertEquals(XATTR4,XATTR4);
assertEquals(XATTR5,XATTR5);
assertFalse(XATTR1.equals(XATTR2));
assertFalse(XATTR2.equals(XATTR3));
assertFalse(XATTR3.equals(XATTR4));
assertFalse(XATTR4.equals(XATTR5));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testXAttrHashCode(){
assertEquals(XATTR.hashCode(),XATTR1.hashCode());
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwnerOnFolder() throws Exception {
Path newFolder=new Path("testOwner");
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder,"newUser",null);
FileStatus newStatus=fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals("newUser",newStatus.getOwner());
assertTrue(newStatus.isDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testFolderLastModifiedTime() throws Exception {
Path parentFolder=new Path("testFolder");
Path innerFile=new Path(parentFolder,"innerfile");
assertTrue(fs.mkdirs(parentFolder));
long lastModifiedTime=fs.getFileStatus(parentFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
assertTrue(fs.createNewFile(innerFile));
assertFalse(testModifiedTime(parentFolder,lastModifiedTime));
testModifiedTime(parentFolder);
lastModifiedTime=fs.getFileStatus(parentFolder).getModificationTime();
Path destFolder=new Path("testDestFolder");
assertTrue(fs.mkdirs(destFolder));
long destLastModifiedTime=fs.getFileStatus(destFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
Path destFile=new Path(destFolder,"innerfile");
assertTrue(fs.rename(innerFile,destFile));
assertFalse(testModifiedTime(parentFolder,lastModifiedTime));
assertFalse(testModifiedTime(destFolder,destLastModifiedTime));
testModifiedTime(parentFolder);
testModifiedTime(destFolder);
destLastModifiedTime=fs.getFileStatus(destFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
fs.delete(destFile,false);
assertFalse(testModifiedTime(destFolder,destLastModifiedTime));
testModifiedTime(destFolder);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReadingDirectoryAsFile() throws Exception {
Path dir=new Path("/x");
assertTrue(fs.mkdirs(dir));
try {
fs.open(dir).close();
assertTrue("Should've thrown",false);
}
catch ( FileNotFoundException ex) {
assertEquals("/x is a directory not a file.",ex.getMessage());
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreatingFileOverDirectory() throws Exception {
Path dir=new Path("/x");
assertTrue(fs.mkdirs(dir));
try {
fs.create(dir).close();
assertTrue("Should've thrown",false);
}
catch ( IOException ex) {
assertEquals("Cannot create file /x; already exists as a directory.",ex.getMessage());
}
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testListSlash() throws Exception {
Path testFolder=new Path("/testFolder");
Path testFile=new Path(testFolder,"testFile");
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.createNewFile(testFile));
FileStatus status=fs.getFileStatus(new Path("/testFolder/."));
assertNotNull(status);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testUriEncodingMoreComplexCharacters() throws Exception {
String fileName="!#$'()*;=[]%";
String directoryName="*;=[]%!#$'()";
fs.create(new Path(directoryName,fileName)).close();
FileStatus[] listing=fs.listStatus(new Path(directoryName));
assertEquals(1,listing.length);
assertEquals(fileName,listing[0].getPath().getName());
FileStatus status=fs.getFileStatus(new Path(directoryName,fileName));
assertEquals(fileName,status.getPath().getName());
InputStream stream=fs.open(new Path(directoryName,fileName));
assertNotNull(stream);
stream.close();
assertTrue(fs.delete(new Path(directoryName,fileName),true));
assertTrue(fs.delete(new Path(directoryName),true));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeepFileCreation() throws Exception {
Path testFile=new Path("deep/file/creation/test");
FsPermission permission=FsPermission.createImmutable((short)644);
createEmptyFile(testFile,permission);
assertTrue(fs.exists(testFile));
assertTrue(fs.exists(new Path("deep")));
assertTrue(fs.exists(new Path("deep/file/creation")));
FileStatus ret=fs.getFileStatus(new Path("deep/file"));
assertTrue(ret.isDirectory());
assertEqualsIgnoreStickyBit(permission,ret.getPermission());
assertTrue(fs.delete(new Path("deep"),true));
assertFalse(fs.exists(testFile));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testRenameImplicitFolder() throws Exception {
Path testFile=new Path("deep/file/rename/test");
FsPermission permission=FsPermission.createImmutable((short)644);
createEmptyFile(testFile,permission);
assertTrue(fs.rename(new Path("deep/file"),new Path("deep/renamed")));
assertFalse(fs.exists(testFile));
FileStatus newStatus=fs.getFileStatus(new Path("deep/renamed/rename/test"));
assertNotNull(newStatus);
assertEqualsIgnoreStickyBit(permission,newStatus.getPermission());
assertTrue(fs.delete(new Path("deep"),true));
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testRenameFolder() throws Exception {
for ( RenameFolderVariation variation : RenameFolderVariation.values()) {
Path originalFolder=new Path("folderToRename");
if (variation != RenameFolderVariation.CreateJustInnerFile) {
assertTrue(fs.mkdirs(originalFolder));
}
Path innerFile=new Path(originalFolder,"innerFile");
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.createNewFile(innerFile));
}
Path destination=new Path("renamedFolder");
assertTrue(fs.rename(originalFolder,destination));
assertTrue(fs.exists(destination));
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.exists(new Path(destination,innerFile.getName())));
}
assertFalse(fs.exists(originalFolder));
assertFalse(fs.exists(innerFile));
fs.delete(destination,true);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStatistics() throws Exception {
FileSystem.clearStatistics();
FileSystem.Statistics stats=FileSystem.getStatistics("wasb",NativeAzureFileSystem.class);
assertEquals(0,stats.getBytesRead());
assertEquals(0,stats.getBytesWritten());
Path newFile=new Path("testStats");
writeString(newFile,"12345678");
assertEquals(8,stats.getBytesWritten());
assertEquals(0,stats.getBytesRead());
String readBack=readString(newFile);
assertEquals("12345678",readBack);
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
assertTrue(fs.delete(newFile,true));
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws Exception {
for ( RenameVariation variation : RenameVariation.values()) {
System.out.printf("Rename variation: %s\n",variation);
Path originalFile;
switch (variation) {
case NormalFileName:
originalFile=new Path("fileToRename");
break;
case SourceInAFolder:
originalFile=new Path("file/to/rename");
break;
case SourceWithSpace:
originalFile=new Path("file to rename");
break;
case SourceWithPlusAndPercent:
originalFile=new Path("file+to%rename");
break;
default :
throw new Exception("Unknown variation");
}
Path destinationFile=new Path("file/resting/destination");
assertTrue(fs.createNewFile(originalFile));
assertTrue(fs.exists(originalFile));
assertFalse(fs.rename(originalFile,destinationFile));
assertTrue(fs.mkdirs(destinationFile.getParent()));
assertTrue(fs.rename(originalFile,destinationFile));
assertTrue(fs.exists(destinationFile));
assertFalse(fs.exists(originalFile));
fs.delete(destinationFile.getParent(),true);
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStoreDeleteFolder() throws Exception {
Path testFolder=new Path("storeDeleteFolder");
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
FileStatus status=fs.getFileStatus(testFolder);
assertNotNull(status);
assertTrue(status.isDirectory());
assertEquals(new FsPermission((short)0755),status.getPermission());
Path innerFile=new Path(testFolder,"innerFile");
assertTrue(fs.createNewFile(innerFile));
assertTrue(fs.exists(innerFile));
assertTrue(fs.delete(testFolder,true));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(testFolder));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStoreRetrieveFile() throws Exception {
Path testFile=new Path("unit-test-file");
writeString(testFile,"Testing");
assertTrue(fs.exists(testFile));
FileStatus status=fs.getFileStatus(testFile);
assertNotNull(status);
assertEquals(new FsPermission((short)0644),status.getPermission());
assertEquals("Testing",readString(testFile));
fs.delete(testFile,true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUriEncoding() throws Exception {
fs.create(new Path("p/t%5Fe")).close();
FileStatus[] listing=fs.listStatus(new Path("p"));
assertEquals(1,listing.length);
assertEquals("t%5Fe",listing[0].getPath().getName());
assertTrue(fs.rename(new Path("p"),new Path("q")));
assertTrue(fs.delete(new Path("q"),true));
}
BooleanVerifier
@Test public void testModifiedTimeForFolder() throws Exception {
Path testFolder=new Path("testFolder");
assertTrue(fs.mkdirs(testFolder));
testModifiedTime(testFolder);
}
BooleanVerifier
@Test public void testCheckingNonExistentOneLetterFile() throws Exception {
assertFalse(fs.exists(new Path("/a")));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyFromLocalFileSystem() throws Exception {
Path localFilePath=new Path(System.getProperty("test.build.data","azure_test"));
FileSystem localFs=FileSystem.get(new Configuration());
localFs.delete(localFilePath,true);
try {
writeString(localFs,localFilePath,"Testing");
Path dstPath=new Path("copiedFromLocal");
assertTrue(FileUtil.copy(localFs,localFilePath,fs,dstPath,false,fs.getConf()));
assertTrue(fs.exists(dstPath));
assertEquals("Testing",readString(fs,dstPath));
fs.delete(dstPath,true);
}
finally {
localFs.delete(localFilePath,true);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListDirectory() throws Exception {
Path rootFolder=new Path("testingList");
assertTrue(fs.mkdirs(rootFolder));
FileStatus[] listed=fs.listStatus(rootFolder);
assertEquals(0,listed.length);
Path innerFolder=new Path(rootFolder,"inner");
assertTrue(fs.mkdirs(innerFolder));
listed=fs.listStatus(rootFolder);
assertEquals(1,listed.length);
assertTrue(listed[0].isDirectory());
Path innerFile=new Path(innerFolder,"innerFile");
writeString(innerFile,"testing");
listed=fs.listStatus(rootFolder);
assertEquals(1,listed.length);
assertTrue(listed[0].isDirectory());
listed=fs.listStatus(innerFolder);
assertEquals(1,listed.length);
assertFalse(listed[0].isDirectory());
assertTrue(fs.delete(rootFolder,true));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetPermissionOnFolder() throws Exception {
Path newFolder=new Path("testPermission");
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission=new FsPermission((short)0600);
fs.setPermission(newFolder,newPermission);
FileStatus newStatus=fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals(newPermission,newStatus.getPermission());
assertTrue(newStatus.isDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testTransientErrorOnDelete() throws Exception {
AzureBlobStorageTestAccount testAccount=AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
try {
NativeAzureFileSystem fs=testAccount.getFileSystem();
injectTransientError(fs,new ConnectionRecognizer(){
@Override public boolean isTargetConnection( HttpURLConnection connection){
return connection.getRequestMethod().equals("DELETE");
}
}
);
Path testFile=new Path("/a/b");
assertTrue(fs.createNewFile(testFile));
assertTrue(fs.rename(testFile,new Path("/x")));
}
finally {
testAccount.cleanup();
}
}
BooleanVerifier
/**
* Try accessing an unauthorized or non-existent (treated the same) container
* from WASB.
*/
@Test public void testAccessUnauthorizedPublicContainer() throws Exception {
Configuration conf=new Configuration();
AzureBlobStorageTestAccount.addWasbToConfiguration(conf);
Path noAccessPath=new Path("wasb://nonExistentContainer@hopefullyNonExistentAccount/someFile");
NativeAzureFileSystem.suppressRetryPolicy();
try {
FileSystem.get(noAccessPath.toUri(),conf).open(noAccessPath);
assertTrue("Should've thrown.",false);
}
catch ( AzureException ex) {
assertTrue("Unexpected message in exception " + ex,ex.getMessage().contains("Unable to access container nonExistentContainer in account" + " hopefullyNonExistentAccount"));
}
finally {
NativeAzureFileSystem.resumeRetryPolicy();
}
}
BooleanVerifier
@Test public void testNoInitialize() throws Exception {
AzureNativeFileSystemStore store=new AzureNativeFileSystemStore();
boolean passed=false;
try {
store.retrieveMetadata("foo");
passed=true;
}
catch ( AssertionError e) {
}
assertFalse("Doing an operation on the store should throw if not initalized.",passed);
}
BooleanVerifier
@Test public void testAccessContainerWithWrongVersion() throws Exception {
AzureNativeFileSystemStore store=new AzureNativeFileSystemStore();
MockStorageInterface mockStorage=new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
FileSystem fs=new NativeAzureFileSystem(store);
try {
Configuration conf=new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf);
HashMap metadata=new HashMap();
metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,"2090-04-05");
mockStorage.addPreExistingContainer(AzureBlobStorageTestAccount.getMockContainerUri(),metadata);
boolean passed=false;
try {
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI),conf);
fs.listStatus(new Path("/"));
passed=true;
}
catch ( AzureException ex) {
assertTrue("Unexpected exception message: " + ex,ex.getMessage().contains("unsupported version: 2090-04-05."));
}
assertFalse("Should've thrown an exception because of the wrong version.",passed);
}
finally {
fs.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test public void testFirstContainerVersionMetadata() throws Exception {
HashMap containerMetadata=new HashMap();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata);
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test public void testPreExistingContainerVersionMetadata() throws Exception {
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create();
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertNull(fsWithContainer.getContainerMetadata());
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerCreateOnWrite() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
try {
fs.open(new Path("/foo"));
assertFalse("Should've thrown.",true);
}
catch ( FileNotFoundException ex) {
}
assertFalse(container.exists());
assertFalse(fs.rename(new Path("/foo"),new Path("/bar")));
assertFalse(container.exists());
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerChecksWithSas() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.createNewFile(new Path("/foo"));
assertFalse("Should've thrown.",true);
}
catch ( AzureException ex) {
}
assertFalse(container.exists());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testContainerCreateAfterDoesNotExist() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
assertNull(fs.listStatus(new Path("/")));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
container.create();
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerExistAfterDoesNotExist() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
container.create();
CloudBlockBlob blob=testAccount.getBlobReference("foo");
BlobOutputStream outputStream=blob.openOutputStream();
outputStream.write(new byte[10]);
outputStream.close();
assertTrue(fs.exists(new Path("/foo")));
assertTrue(container.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLinkBlobs() throws Exception {
Path filePath=new Path("/inProgress");
FSDataOutputStream outputStream=fs.create(filePath);
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNotNull(metadata);
String linkValue=metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
assertNotNull(linkValue);
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri(linkValue)));
assertTrue(fs.exists(filePath));
outputStream.close();
metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNull(metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY));
}
IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Tests running starting multiple threads all doing various File system
* operations against the same FS.
*/
@Test public void testMultiThreadedOperation() throws Exception {
for (int iter=0; iter < 10; iter++) {
final int numThreads=20;
Thread[] threads=new Thread[numThreads];
final ConcurrentLinkedQueue exceptionsEncountered=new ConcurrentLinkedQueue();
for (int i=0; i < numThreads; i++) {
final Path threadLocalFile=new Path("/myFile" + i);
threads[i]=new Thread(new Runnable(){
@Override public void run(){
try {
assertTrue(!fs.exists(threadLocalFile));
OutputStream output=fs.create(threadLocalFile);
output.write(5);
output.close();
assertTrue(fs.exists(threadLocalFile));
assertTrue(fs.listStatus(new Path("/")).length > 0);
}
catch ( Throwable ex) {
exceptionsEncountered.add(ex);
}
}
}
);
}
for ( Thread t : threads) {
t.start();
}
for ( Thread t : threads) {
t.join();
}
assertTrue("Encountered exceptions: " + StringUtils.join("\r\n",selectToString(exceptionsEncountered)),exceptionsEncountered.isEmpty());
tearDown();
setUp();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRename() throws Exception {
Path testFile1=new Path(root + "/testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2=new Path(root + "/testFile2");
fs.rename(testFile1,testFile2);
assertTrue(!fs.exists(testFile1) && fs.exists(testFile2));
Path testFile3=new Path(root + "/testFile3:3");
try {
fs.rename(testFile2,testFile3);
fail("Should've thrown.");
}
catch ( IOException e) {
}
assertTrue(fs.exists(testFile2));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCreate() throws Exception {
Path testFile1=new Path(root + "/testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2=new Path(root + "/testFile2:2");
try {
fs.createNewFile(testFile2);
fail("Should've thrown.");
}
catch ( IOException e) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testWasbFsck() throws Exception {
Path testFolder1=new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
Path testFolder2=new Path(testFolder1,"testFolder2");
assertTrue(fs.mkdirs(testFolder2));
Path testFolder3=new Path(testFolder1,"testFolder3");
assertTrue(fs.mkdirs(testFolder3));
Path testFile1=new Path(testFolder2,"testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2=new Path(testFolder1,"testFile2");
assertTrue(fs.createNewFile(testFile2));
assertFalse(runWasbFsck(testFolder1));
InMemoryBlockBlobStore backingStore=testAccount.getMockStorage().getBackingStore();
backingStore.setContent(AzureBlobStorageTestAccount.toMockUri("testFolder1/testFolder2/test2:2"),new byte[]{1,2},new HashMap());
assertTrue(runWasbFsck(testFolder1));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirs() throws Exception {
Path testFolder1=new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
Path testFolder2=new Path(root + "/testFolder2:2");
try {
assertTrue(fs.mkdirs(testFolder2));
fail("Should've thrown.");
}
catch ( IOException e) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFileInImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root/b"),true));
assertTrue(fs.exists(new Path("/root")));
}
BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Tests that when we create the file (or folder) x/y/z, we also create
* explicit folder blobs for x and x/y
*/
@Test public void testCreatingDeepFileCreatesExplicitFolder() throws Exception {
for ( DeepCreateTestVariation variation : DeepCreateTestVariation.values()) {
switch (variation) {
case File:
assertTrue(fs.createNewFile(new Path("/x/y/z")));
break;
case Folder:
assertTrue(fs.mkdirs(new Path("/x/y/z")));
break;
}
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri("x")));
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri("x/y")));
fs.delete(new Path("/x"),true);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root"),true));
assertFalse(fs.exists(new Path("/root")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testImplicitFolderListed() throws Exception {
createEmptyBlobOutOfBand("root/b");
FileStatus[] obtained=fs.listStatus(new Path("/root/b"));
assertNotNull(obtained);
assertEquals(1,obtained.length);
assertFalse(obtained[0].isDirectory());
assertEquals("/root/b",obtained[0].getPath().toUri().getPath());
obtained=fs.listStatus(new Path("/root"));
assertNotNull(obtained);
assertEquals(1,obtained.length);
assertFalse(obtained[0].isDirectory());
assertEquals("/root/b",obtained[0].getPath().toUri().getPath());
FileStatus dirStatus=fs.getFileStatus(new Path("/root"));
assertNotNull(dirStatus);
assertTrue(dirStatus.isDirectory());
assertEquals("/root",dirStatus.getPath().toUri().getPath());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileAndImplicitFolderSameName() throws Exception {
createEmptyBlobOutOfBand("root/b");
createEmptyBlobOutOfBand("root/b/c");
FileStatus[] listResult=fs.listStatus(new Path("/root/b"));
assertEquals(1,listResult.length);
assertFalse(listResult[0].isDirectory());
try {
fs.delete(new Path("/root/b/c"),true);
assertTrue("Should've thrown.",false);
}
catch ( AzureException e) {
assertEquals("File /root/b/c has a parent directory /root/b" + " which is also a file. Can't resolve.",e.getMessage());
}
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_uncleMkdirs() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder1/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
Path targetFolder=new Path("testFolder1/a/output");
assertTrue(fs.mkdirs(targetFolder));
}
BooleanVerifier
@Test public void outOfBandFolder_rename() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder4/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
Path srcFilePath=new Path("testFolder4/a/input/file");
assertTrue(fs.exists(srcFilePath));
Path destFilePath=new Path("testFolder4/a/input/file2");
fs.rename(srcFilePath,destFilePath);
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
CloudBlockBlob blob=testAccount.getBlobReference("folderW/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/folderW")));
assertTrue(fs.exists(new Path("/folderW/file")));
assertTrue(fs.delete(new Path("/folderW"),true));
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_rootFileDelete() throws Exception {
CloudBlockBlob blob=testAccount.getBlobReference("fileY");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/fileY")));
assertTrue(fs.delete(new Path("/fileY"),true));
}
BooleanVerifier
@Test public void outOfBandFolder_siblingCreate() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder3/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
Path targetFile=new Path("testFolder3/a/input/file2");
FSDataOutputStream s2=fs.create(targetFile);
s2.close();
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_parentDelete() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder2/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
Path targetFolder=new Path("testFolder2/a/input");
assertTrue(fs.delete(targetFolder,true));
}
BooleanVerifier
@Test public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
CloudBlockBlob blob=testAccount.getBlobReference("fileX");
BlobOutputStream s=blob.openOutputStream();
s.close();
Path srcFilePath=new Path("/fileX");
assertTrue(fs.exists(srcFilePath));
Path destFilePath=new Path("/fileXrename");
fs.rename(srcFilePath,destFilePath);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that we delete dangling files properly
*/
@Test public void testDelete() throws Exception {
Path danglingFile=new Path("/crashedInTheMiddle");
FSDataOutputStream stream=fs.create(danglingFile);
stream.write(new byte[]{1,2,3});
stream.flush();
FileStatus fileStatus=fs.getFileStatus(danglingFile);
assertNotNull(fileStatus);
assertEquals(0,fileStatus.getLen());
assertEquals(1,getNumTempBlobs());
runFsck("-delete");
assertEquals(0,getNumTempBlobs());
assertFalse(fs.exists(danglingFile));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConnectUsingSASReadonly() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas,CreateOptions.CreateContainer,CreateOptions.Readonly));
assumeNotNull(testAccount);
final String blobKey="blobForReadonly";
CloudBlobContainer container=testAccount.getRealContainer();
CloudBlockBlob blob=container.getBlockBlobReference(blobKey);
ByteArrayInputStream inputStream=new ByteArrayInputStream(new byte[]{1,2,3});
blob.upload(inputStream,3);
inputStream.close();
Path filePath=new Path("/" + blobKey);
FileSystem fs=testAccount.getFileSystem();
assertTrue(fs.exists(filePath));
byte[] obtained=new byte[3];
DataInputStream obtainedInputStream=fs.open(filePath);
obtainedInputStream.readFully(obtained);
obtainedInputStream.close();
assertEquals(3,obtained[2]);
}
BooleanVerifier
@Test public void testConnectToEmulator() throws Exception {
testAccount=AzureBlobStorageTestAccount.createForEmulator();
assumeNotNull(testAccount);
assertTrue(validateIOStreams(new Path("/testFile")));
}
BooleanVerifier
/**
* Tests that we can connect to fully qualified accounts outside of
* blob.core.windows.net
*/
@Test public void testConnectToFullyQualifiedAccountMock() throws Exception {
Configuration conf=new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf,"mockAccount.mock.authority.net");
AzureNativeFileSystemStore store=new AzureNativeFileSystemStore();
MockStorageInterface mockStorage=new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
NativeAzureFileSystem fs=new NativeAzureFileSystem(store);
fs.initialize(new URI("wasb://mockContainer@mockAccount.mock.authority.net"),conf);
fs.createNewFile(new Path("/x"));
assertTrue(mockStorage.getBackingStore().exists("http://mockAccount.mock.authority.net/mockContainer/x"));
fs.close();
}
BooleanVerifier
@Test public void testConnectUsingSAS() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas,CreateOptions.CreateContainer));
assumeNotNull(testAccount);
assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
}
BooleanVerifier
@Test public void testConnectUsingKey() throws Exception {
testAccount=AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
assertTrue(validateIOStreams(new Path("/wasb_scheme")));
}
InternalCallVerifier BooleanVerifier
@Test public void testMultipleContainers() throws Exception {
AzureBlobStorageTestAccount firstAccount=AzureBlobStorageTestAccount.create("first"), secondAccount=AzureBlobStorageTestAccount.create("second");
assumeNotNull(firstAccount);
assumeNotNull(secondAccount);
try {
FileSystem firstFs=firstAccount.getFileSystem(), secondFs=secondAccount.getFileSystem();
Path testFile=new Path("/testWasb");
assertTrue(validateIOStreams(firstFs,testFile));
assertTrue(validateIOStreams(secondFs,testFile));
writeSingleByte(firstFs,testFile,5);
writeSingleByte(secondFs,testFile,7);
assertSingleByteValue(firstFs,testFile,5);
assertSingleByteValue(secondFs,testFile,7);
}
finally {
firstAccount.cleanup();
secondAccount.cleanup();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnMkdirList() throws Exception {
long base=getBaseWebResponses();
assertTrue(fs.mkdirs(new Path("a")));
base=assertWebResponsesInRange(base,1,12);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_DIRECTORIES_CREATED));
assertEquals(1,fs.listStatus(new Path("/")).length);
base=assertWebResponsesEquals(base,1);
assertNoErrors();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testClientErrorMetrics() throws Exception {
String directoryName="metricsTestDirectory_ClientError";
Path directoryPath=new Path("/" + directoryName);
assertTrue(fs.mkdirs(directoryPath));
String leaseID=testAccount.acquireShortLease(directoryName);
try {
try {
fs.delete(directoryPath,true);
assertTrue("Should've thrown.",false);
}
catch ( AzureException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("lease"));
}
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_CLIENT_ERRORS));
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_SERVER_ERRORS));
}
finally {
testAccount.releaseLease(leaseID,directoryName);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileRename() throws Exception {
long base=getBaseWebResponses();
Path originalPath=new Path("/metricsTest_RenameStart");
Path destinationPath=new Path("/metricsTest_RenameFinal");
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
logOpResponseCount("Creating an empty file",base);
base=assertWebResponsesInRange(base,2,20);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.rename(originalPath,destinationPath));
logOpResponseCount("Renaming a file",base);
base=assertWebResponsesInRange(base,2,15);
assertNoErrors();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=1000;
getBandwidthGaugeUpdater().suppressAutoUpdate();
Date start=new Date();
OutputStream outputStream=fs.create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs=new Date().getTime() - start.getTime();
logOpResponseCount("Creating a 1K file",base);
base=assertWebResponsesInRange(base,2,15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate);
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
long expectedLatency=uploadDurationMs;
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency);
start=new Date();
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs=new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 1K file",base);
base=assertWebResponsesInRange(base,1,10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate);
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
expectedLatency=downloadDurationMs;
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency);
assertNoErrors();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnBigFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=100 * 1024 * 1024;
getBandwidthGaugeUpdater().suppressAutoUpdate();
OutputStream outputStream=fs.create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
logOpResponseCount("Creating a 100 MB file",base);
base=assertWebResponsesInRange(base,20,50);
getBandwidthGaugeUpdater().triggerUpdate(true);
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 100 MB file",base);
base=assertWebResponsesInRange(base,20,40);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testMetricsOnDirRename() throws Exception {
long base=getBaseWebResponses();
Path originalDirName=new Path("/metricsTestDirectory_RenameStart");
Path innerFileName=new Path(originalDirName,"innerFile");
Path destDirName=new Path("/metricsTestDirectory_RenameFinal");
assertTrue(fs.mkdirs(originalDirName));
base=getCurrentWebResponses();
assertTrue(fs.createNewFile(innerFileName));
base=getCurrentWebResponses();
assertTrue(fs.rename(originalDirName,destDirName));
logOpResponseCount("Renaming a directory",base);
base=assertWebResponsesInRange(base,1,20);
assertNoErrors();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileExistsDelete() throws Exception {
long base=getBaseWebResponses();
Path filePath=new Path("/metricsTest_delete");
assertFalse(fs.exists(filePath));
logOpResponseCount("Checking file existence for non-existent file",base);
base=assertWebResponsesInRange(base,1,3);
assertTrue(fs.createNewFile(filePath));
base=getCurrentWebResponses();
assertTrue(fs.exists(filePath));
logOpResponseCount("Checking file existence for existent file",base);
base=assertWebResponsesInRange(base,1,2);
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertTrue(fs.delete(filePath,false));
logOpResponseCount("Deleting a file",base);
base=assertWebResponsesInRange(base,1,4);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertNoErrors();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSingleThreaded() throws Exception {
AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration());
BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true);
updater.triggerUpdate(true);
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(),new Date(),150);
updater.triggerUpdate(true);
assertEquals(150,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(new Date().getTime() - 10000),new Date(),200);
updater.triggerUpdate(true);
long currentBytes=AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation);
assertTrue("We expect around (200/10 = 20) bytes written as the gauge value." + "Got " + currentBytes,currentBytes > 18 && currentBytes < 22);
updater.close();
}
BooleanVerifier AssumptionSetter HybridVerifier
@Test public void testFinalizerThreadShutdown() throws Exception {
System.gc();
System.runFinalization();
int nUpdaterThreadsStart=getWasbThreadCount();
assertTrue("Existing WASB threads have not been cleared",nUpdaterThreadsStart == 0);
final int nFilesystemsToSpawn=10;
AzureBlobStorageTestAccount testAccount=null;
for (int i=0; i < nFilesystemsToSpawn; i++) {
testAccount=AzureBlobStorageTestAccount.createMock();
testAccount.getFileSystem();
}
int nUpdaterThreadsAfterSpawn=getWasbThreadCount();
Assume.assumeTrue("Background threads should have spawned.",nUpdaterThreadsAfterSpawn == 10);
testAccount=null;
System.gc();
System.runFinalization();
int nUpdaterThreadsAfterCleanup=getWasbThreadCount();
assertTrue("Finalizers should have reduced the thread count. ",nUpdaterThreadsAfterCleanup == 0);
}
BooleanVerifier
@Test public void testMetricsSourceNames(){
String name1=NativeAzureFileSystem.newMetricsSourceName();
String name2=NativeAzureFileSystem.newMetricsSourceName();
assertTrue(name1.startsWith("AzureFileSystemMetrics"));
assertTrue(name2.startsWith("AzureFileSystemMetrics"));
assertTrue(!name1.equals(name2));
}
BooleanVerifier
@Test public void testDeleteNonexistentPathNonRecursive() throws Throwable {
Path path=path("testDeleteNonexistentPathNonRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(),"leftover",path);
ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to recursively delete" + " a nonexistent path " + path,getFileSystem().delete(path,false));
}
BooleanVerifier
@Test public void testDeleteNonexistentPathRecursive() throws Throwable {
Path path=path("testDeleteNonexistentPathRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(),"leftover",path);
ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to delete" + " a nonexistent path " + path,getFileSystem().delete(path,false));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRenameDirIntoExistingDir() throws Throwable {
describe("Verify renaming a dir into an existing dir puts it underneath" + " and leaves existing files alone");
FileSystem fs=getFileSystem();
String sourceSubdir="source";
Path srcDir=path(sourceSubdir);
Path srcFilePath=new Path(srcDir,"source-256.txt");
byte[] srcDataset=dataset(256,'a','z');
writeDataset(fs,srcFilePath,srcDataset,srcDataset.length,1024,false);
Path destDir=path("dest");
Path destFilePath=new Path(destDir,"dest-512.txt");
byte[] destDateset=dataset(512,'A','Z');
writeDataset(fs,destFilePath,destDateset,destDateset.length,1024,false);
assertIsFile(destFilePath);
boolean rename=rename(srcDir,destDir);
Path renamedSrc=new Path(destDir,sourceSubdir);
assertIsFile(destFilePath);
assertIsDirectory(renamedSrc);
ContractTestUtils.verifyFileContents(fs,destFilePath,destDateset);
assertTrue("rename returned false though the contents were copied",rename);
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Rename test -handles filesystems that will overwrite the destination
* as well as those that do not (i.e. HDFS).
* @throws Throwable
*/
@Test public void testRenameFileOverExistingFile() throws Throwable {
describe("Verify renaming a file onto an existing file matches expectations");
Path srcFile=path("source-256.txt");
byte[] srcData=dataset(256,'a','z');
writeDataset(getFileSystem(),srcFile,srcData,srcData.length,1024,false);
Path destFile=path("dest-512.txt");
byte[] destData=dataset(512,'A','Z');
writeDataset(getFileSystem(),destFile,destData,destData.length,1024,false);
assertIsFile(destFile);
boolean renameOverwritesDest=isSupported(RENAME_OVERWRITES_DEST);
boolean renameReturnsFalseOnRenameDestExists=!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
boolean destUnchanged=true;
try {
boolean renamed=rename(srcFile,destFile);
if (renameOverwritesDest) {
assertTrue("Rename returned false",renamed);
destUnchanged=false;
}
else {
if (renamed && !renameReturnsFalseOnRenameDestExists) {
String destDirLS=generateAndLogErrorListing(srcFile,destFile);
getLog().error("dest dir {}",destDirLS);
fail("expected rename(" + srcFile + ", "+ destFile+ " ) to fail,"+ " but got success and destination of "+ destDirLS);
}
}
}
catch ( FileAlreadyExistsException e) {
handleExpectedException(e);
}
ContractTestUtils.verifyFileContents(getFileSystem(),destFile,destUnchanged ? destData : srcData);
}
APIUtilityVerifier BooleanVerifier
@Test public void testRenameNewFileSameDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc=path("rename_src");
Path renameTarget=path("rename_dest");
byte[] data=dataset(256,'a','z');
writeDataset(getFileSystem(),renameSrc,data,data.length,1024 * 1024,false);
boolean rename=rename(renameSrc,renameTarget);
assertTrue("rename(" + renameSrc + ", "+ renameTarget+ ") returned false",rename);
ContractTestUtils.assertListStatusFinds(getFileSystem(),renameTarget.getParent(),renameTarget);
ContractTestUtils.verifyFileContents(getFileSystem(),renameTarget,data);
}
APIUtilityVerifier BranchVerifier BooleanVerifier
@Test public void testRenameFileNonexistentDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc=path("testRenameSrc");
Path renameTarget=path("subdir/testRenameTarget");
byte[] data=dataset(256,'a','z');
writeDataset(getFileSystem(),renameSrc,data,data.length,1024 * 1024,false);
boolean renameCreatesDestDirs=isSupported(RENAME_CREATES_DEST_DIRS);
try {
boolean rename=rename(renameSrc,renameTarget);
if (renameCreatesDestDirs) {
assertTrue(rename);
ContractTestUtils.verifyFileContents(getFileSystem(),renameTarget,data);
}
else {
assertFalse(rename);
ContractTestUtils.verifyFileContents(getFileSystem(),renameSrc,data);
}
}
catch ( FileNotFoundException e) {
assertFalse(renameCreatesDestDirs);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRenameNonexistentFile() throws Throwable {
describe("rename a file into a new file in the same directory");
Path missing=path("testRenameNonexistentFileSrc");
Path target=path("testRenameNonexistentFileDest");
boolean renameReturnsFalseOnFailure=isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
mkdirs(missing.getParent());
try {
boolean renamed=rename(missing,target);
if (!renameReturnsFalseOnFailure) {
String destDirLS=generateAndLogErrorListing(missing,target);
fail("expected rename(" + missing + ", "+ target+ " ) to fail,"+ " got a result of "+ renamed+ " and a destination directory of "+ destDirLS);
}
else {
getLog().warn("Rename returned {} renaming a nonexistent file",renamed);
assertFalse("Renaming a missing file returned true",renamed);
}
}
catch ( FileNotFoundException e) {
if (renameReturnsFalseOnFailure) {
ContractTestUtils.fail("Renaming a missing file unexpectedly threw an exception",e);
}
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("rename nonexistent file","FileNotFoundException",e);
}
assertPathDoesNotExist("rename nonexistent file created a destination file",target);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe("verify that a positioned read does not change the getPos() value");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
describe("do a seek past the EOF, then verify the stream recovers");
instream=getFileSystem().open(smallSeekFile);
boolean canSeekPastEOF=!getContract().isSupported(ContractOptions.REJECTS_SEEK_PAST_EOF,true);
try {
instream.seek(TEST_FILE_LEN + 1);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException e) {
if (canSeekPastEOF) {
throw e;
}
handleExpectedException(e);
}
catch ( IOException e) {
if (canSeekPastEOF) {
throw e;
}
handleRelaxedException("a seek past the end of the file","EOFException",e);
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void testContractWorks() throws Throwable {
String key=getContract().getConfKey(SUPPORTS_ATOMIC_RENAME);
assertNotNull("not set: " + key,getContract().getConf().get(key));
assertTrue("not true: " + key,getContract().isSupported(SUPPORTS_ATOMIC_RENAME,false));
}
BooleanVerifier
@Test public void testDeleteEmptyPath() throws Throwable {
File nonexistent=new File(testDirectory,"testDeleteEmptyPath");
assertFalse(nonexistent.exists());
assertFalse("nonexistent.delete() returned true",nonexistent.delete());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
*
* - Initial status with no ACLs
* - The addition of a default ACL
* - The removal of default ACLs
*
* @throws Exception
*/
@Test @TestDir @TestJetty @TestHdfs public void testDirAcls() throws Exception {
final String defUser1="default:user:glarch:r-x";
final String defSpec1="aclspec=" + defUser1;
final String dir="/aclDirTest";
String statusJson;
List aclEntries;
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
putCmd(dir,"SETACL",defSpec1);
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 5);
Assert.assertTrue(aclEntries.contains(defUser1));
putCmd(dir,"REMOVEDEFAULTACL",null);
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
}
BooleanVerifier
/**
* Validate that files are created with 755 permissions when no
* 'permissions' attribute is specified, and when 'permissions'
* is specified, that value is honored.
*/
@Test @TestDir @TestJetty @TestHdfs public void testPerms() throws Exception {
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/perm"));
createWithHttp("/perm/none",null);
String statusJson=getStatus("/perm/none","GETFILESTATUS");
Assert.assertTrue("755".equals(getPerms(statusJson)));
createWithHttp("/perm/p-777","777");
statusJson=getStatus("/perm/p-777","GETFILESTATUS");
Assert.assertTrue("777".equals(getPerms(statusJson)));
createWithHttp("/perm/p-654","654");
statusJson=getStatus("/perm/p-654","GETFILESTATUS");
Assert.assertTrue("654".equals(getPerms(statusJson)));
createWithHttp("/perm/p-321","321");
statusJson=getStatus("/perm/p-321","GETFILESTATUS");
Assert.assertTrue("321".equals(getPerms(statusJson)));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @TestDir @TestJetty @TestHdfs public void instrumentation() throws Exception {
createHttpFSServer(false);
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation","nobody"));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line=reader.readLine();
reader.close();
Assert.assertTrue(line.contains("\"counters\":{"));
url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
*
* - Create a file with no ACLs
* - Add a user + group ACL
* - Add another user ACL
* - Remove the first user ACL
* - Remove all ACLs
*
*/
@Test @TestDir @TestJetty @TestHdfs public void testFileAcls() throws Exception {
final String aclUser1="user:foo:rw-";
final String aclUser2="user:bar:r--";
final String aclGroup1="group::r--";
final String aclSpec="aclspec=user::rwx," + aclUser1 + ","+ aclGroup1+ ",other::---";
final String modAclSpec="aclspec=" + aclUser2;
final String remAclSpec="aclspec=" + aclUser1;
final String dir="/aclFileTest";
final String path=dir + "/test";
String statusJson;
List aclEntries;
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path,null);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
putCmd(path,"SETACL",aclSpec);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"MODIFYACLENTRIES",modAclSpec);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 3);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"REMOVEACLENTRIES",remAclSpec);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"REMOVEACL",null);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testEntryEquals(){
assertNotSame(ENTRY1,ENTRY2);
assertNotSame(ENTRY1,ENTRY3);
assertNotSame(ENTRY1,ENTRY4);
assertNotSame(ENTRY2,ENTRY3);
assertNotSame(ENTRY2,ENTRY4);
assertNotSame(ENTRY3,ENTRY4);
assertEquals(ENTRY1,ENTRY1);
assertEquals(ENTRY2,ENTRY2);
assertEquals(ENTRY1,ENTRY2);
assertEquals(ENTRY2,ENTRY1);
assertFalse(ENTRY1.equals(ENTRY3));
assertFalse(ENTRY1.equals(ENTRY4));
assertFalse(ENTRY3.equals(ENTRY4));
assertFalse(ENTRY1.equals(null));
assertFalse(ENTRY1.equals(new Object()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStatusHashCode(){
assertEquals(STATUS1.hashCode(),STATUS2.hashCode());
assertFalse(STATUS1.hashCode() == STATUS3.hashCode());
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testStatusEquals(){
assertNotSame(STATUS1,STATUS2);
assertNotSame(STATUS1,STATUS3);
assertNotSame(STATUS2,STATUS3);
assertEquals(STATUS1,STATUS1);
assertEquals(STATUS2,STATUS2);
assertEquals(STATUS1,STATUS2);
assertEquals(STATUS2,STATUS1);
assertFalse(STATUS1.equals(STATUS3));
assertFalse(STATUS2.equals(STATUS3));
assertFalse(STATUS1.equals(null));
assertFalse(STATUS1.equals(new Object()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEntryHashCode(){
assertEquals(ENTRY1.hashCode(),ENTRY2.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode());
assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode());
}
InternalCallVerifier BooleanVerifier
/**
* Ensure that when we set a sticky bit and shut down the file system, we get
* the sticky bit back on re-start, and that no extra sticky bits appear after
* re-start.
*/
@Test public void testStickyBitPersistence() throws Exception {
Path sbSet=new Path("/Housemartins");
Path sbNotSpecified=new Path("/INXS");
Path sbSetOff=new Path("/Easyworld");
for ( Path p : new Path[]{sbSet,sbNotSpecified,sbSetOff}) hdfs.mkdirs(p);
hdfs.setPermission(sbSet,new FsPermission((short)01777));
hdfs.setPermission(sbSetOff,new FsPermission((short)00777));
shutdown();
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
InternalCallVerifier BooleanVerifier
@Test public void testAclStickyBitPersistence() throws Exception {
Path sbSet=new Path("/Housemartins");
Path sbNotSpecified=new Path("/INXS");
Path sbSetOff=new Path("/Easyworld");
for ( Path p : new Path[]{sbSet,sbNotSpecified,sbSetOff}) hdfs.mkdirs(p);
hdfs.setPermission(sbSet,new FsPermission((short)01777));
applyAcl(sbSet);
hdfs.setPermission(sbSetOff,new FsPermission((short)00777));
applyAcl(sbSetOff);
shutdown();
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
BooleanVerifier
@Test public void testSetfaclValidationsWithoutPermissions() throws Exception {
List parsedList=new ArrayList();
try {
parsedList=AclEntry.parseAclSpec("user:user1:",true);
}
catch ( IllegalArgumentException e) {
}
assertTrue(parsedList.size() == 0);
assertFalse("setfacl should fail with less arguments",0 == runCommand(new String[]{"-setfacl","-m","user:user1:","/path"}));
}
BooleanVerifier
@Test public void testGetfaclValidations() throws Exception {
assertFalse("getfacl should fail without path",0 == runCommand(new String[]{"-getfacl"}));
assertFalse("getfacl should fail with extra argument",0 == runCommand(new String[]{"-getfacl","/test","extraArg"}));
}
BooleanVerifier
@Test public void testSetfaclValidations() throws Exception {
assertFalse("setfacl should fail without path",0 == runCommand(new String[]{"-setfacl"}));
assertFalse("setfacl should fail without aclSpec",0 == runCommand(new String[]{"-setfacl","-m","/path"}));
assertFalse("setfacl should fail with conflicting options",0 == runCommand(new String[]{"-setfacl","-m","/path"}));
assertFalse("setfacl should fail with extra arguments",0 == runCommand(new String[]{"-setfacl","/path","extra"}));
assertFalse("setfacl should fail with extra arguments",0 == runCommand(new String[]{"-setfacl","--set","default:user::rwx","/path","extra"}));
assertFalse("setfacl should fail with permissions for -x",0 == runCommand(new String[]{"-setfacl","-x","user:user1:rwx","/path"}));
assertFalse("setfacl should fail ACL spec missing",0 == runCommand(new String[]{"-setfacl","-m","","/path"}));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testDirectoryCpWithoutP() throws Exception {
run(new Cp(),"d1","d4");
assertTrue(fs.getFileStatus(new Path("d1")).getModificationTime() != fs.getFileStatus(new Path("d4")).getModificationTime());
assertTrue(!fs.getFileStatus(new Path("d1")).getPermission().equals(fs.getFileStatus(new Path("d4")).getPermission()));
}
InternalCallVerifier BooleanVerifier
@Test public void processOptionsAll(){
LinkedList options=new LinkedList();
options.add("-q");
options.add("-h");
options.add("dummy");
Count count=new Count();
count.processOptions(options);
assertTrue(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
InternalCallVerifier BooleanVerifier
@Test public void processOptionsHumanReadable(){
LinkedList options=new LinkedList();
options.add("-h");
options.add("dummy");
Count count=new Count();
count.processOptions(options);
assertFalse(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
InternalCallVerifier BooleanVerifier
@Test public void testMoveTargetExistsWithoutExplicitRename() throws Exception {
Path srcPath=new Path("mockfs:/file");
Path targetPath=new Path("mockfs:/fold0");
Path dupPath=new Path("mockfs:/fold0/file");
Path srcPath2=new Path("mockfs://user/file");
Path targetPath2=new Path("mockfs://user/fold0");
Path dupPath2=new Path("mockfs://user/fold0/file");
InstrumentedRenameCommand cmd;
String[] cmdargs=new String[]{"mockfs:/file","mockfs:/fold0"};
FileStatus src_fileStat, target_fileStat, dup_fileStat;
URI myuri;
src_fileStat=mock(FileStatus.class);
target_fileStat=mock(FileStatus.class);
dup_fileStat=mock(FileStatus.class);
myuri=new URI("mockfs://user");
when(src_fileStat.isDirectory()).thenReturn(false);
when(target_fileStat.isDirectory()).thenReturn(true);
when(dup_fileStat.isDirectory()).thenReturn(false);
when(src_fileStat.getPath()).thenReturn(srcPath2);
when(target_fileStat.getPath()).thenReturn(targetPath2);
when(dup_fileStat.getPath()).thenReturn(dupPath2);
when(mockFs.getFileStatus(eq(srcPath))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath))).thenReturn(dup_fileStat);
when(mockFs.getFileStatus(eq(srcPath2))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath2))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath2))).thenReturn(dup_fileStat);
when(mockFs.getUri()).thenReturn(myuri);
cmd=new InstrumentedRenameCommand();
cmd.setConf(conf);
cmd.setOverwrite(true);
cmd.run(cmdargs);
assertTrue("Rename should have failed with path exists exception",cmd.error instanceof PathExistsException);
}
InternalCallVerifier BooleanVerifier
@Test public void testRemoteExceptionUnwrap() throws Exception {
PathIOException pe;
RemoteException re;
IOException ie;
pe=new PathIOException(path);
re=new RemoteException(PathIOException.class.getName(),"test constructor1");
ie=re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie=re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
pe=new PathIOException(path,"constructor2");
re=new RemoteException(PathIOException.class.getName(),"test constructor2");
ie=re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie=re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
}
BooleanVerifier
@Test public void testGetfattrValidations() throws Exception {
errContent.reset();
assertFalse("getfattr should fail without path",0 == runCommand(new String[]{"-getfattr","-d"}));
assertTrue(errContent.toString().contains(" is missing"));
errContent.reset();
assertFalse("getfattr should fail with extra argument",0 == runCommand(new String[]{"-getfattr","extra","-d","/test"}));
assertTrue(errContent.toString().contains("Too many arguments"));
errContent.reset();
assertFalse("getfattr should fail without \"-n name\" or \"-d\"",0 == runCommand(new String[]{"-getfattr","/test"}));
assertTrue(errContent.toString().contains("Must specify '-n name' or '-d' option"));
errContent.reset();
assertFalse("getfattr should fail with invalid encoding",0 == runCommand(new String[]{"-getfattr","-d","-e","aaa","/test"}));
assertTrue(errContent.toString().contains("Invalid/unsupported encoding option specified: aaa"));
}
BooleanVerifier
@Test public void testSetfattrValidations() throws Exception {
errContent.reset();
assertFalse("setfattr should fail without path",0 == runCommand(new String[]{"-setfattr","-n","user.a1"}));
assertTrue(errContent.toString().contains(" is missing"));
errContent.reset();
assertFalse("setfattr should fail with extra arguments",0 == runCommand(new String[]{"-setfattr","extra","-n","user.a1","/test"}));
assertTrue(errContent.toString().contains("Too many arguments"));
errContent.reset();
assertFalse("setfattr should fail without \"-n name\" or \"-x name\"",0 == runCommand(new String[]{"-setfattr","/test"}));
assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option"));
}
InternalCallVerifier BooleanVerifier
@Test public void testBadChunks() throws Exception {
File fn=getTestFile();
int byteAm=10000;
FileOutputStream fout=new FileOutputStream(fn);
byte[] bytes=new byte[byteAm];
rnd.nextBytes(bytes);
fout.write(bytes);
fout.close();
DataVerifier vf=new DataVerifier();
VerifyOutput vout=new VerifyOutput(0,0,0,0);
DataInputStream in=null;
try {
in=new DataInputStream(new FileInputStream(fn));
vout=vf.verifyFile(byteAm,in);
}
catch ( Exception e) {
}
finally {
if (in != null) in.close();
}
assertTrue(vout.getChunksSame() == 0);
}
BooleanVerifier
@Test public void testFinder() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
PathFinder fr=new PathFinder(extractor,rnd);
int maxIterations=10000;
Set files=new HashSet();
for (int i=0; i < maxIterations; i++) {
files.add(fr.getFile());
}
assertTrue(files.size() == 10);
Set dirs=new HashSet();
for (int i=0; i < maxIterations; i++) {
dirs.add(fr.getDirectory());
}
assertTrue(dirs.size() == 10);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDataWriting() throws Exception {
long byteAm=100;
File fn=getTestFile();
DataWriter writer=new DataWriter(rnd);
FileOutputStream fs=new FileOutputStream(fn);
GenerateOutput ostat=writer.writeSegment(byteAm,fs);
LOG.info(ostat);
fs.close();
assertTrue(ostat.getBytesWritten() == byteAm);
DataVerifier vf=new DataVerifier();
FileInputStream fin=new FileInputStream(fn);
VerifyOutput vfout=vf.verifyFile(byteAm,new DataInputStream(fin));
LOG.info(vfout);
fin.close();
assertEquals(vfout.getBytesRead(),byteAm);
assertTrue(vfout.getChunksDifferent() == 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMRFlow() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
SliveTest s=new SliveTest(getBaseConfig());
int ec=ToolRunner.run(s,getTestArgs(false));
assertTrue(ec == 0);
String resFile=extractor.getResultFile();
File fn=new File(resFile);
assertTrue(fn.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testSelector() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
RouletteSelector selector=new RouletteSelector(rnd);
List sList=new LinkedList();
Operation op=selector.select(sList);
assertTrue(op == null);
CreateOp cop=new CreateOp(extractor,rnd);
sList.add(new OperationWeight(cop,1.0d));
AppendOp aop=new AppendOp(extractor,rnd);
sList.add(new OperationWeight(aop,0.01d));
op=selector.select(sList);
assertTrue(op == cop);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Seek past the buffer then read
* @throws Throwable problems
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndPastEndOfFileThenReseekAndRead() throws Throwable {
instream=fs.open(smallSeekFile);
try {
instream.seek(SMALL_SEEK_FILE_LEN);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException expected) {
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
Path testSeekFile=new Path(testPath,"bigseekfile.txt");
byte[] block=SwiftTestUtils.dataset(65536,0,255);
createFile(testSeekFile,block);
instream=fs.open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier BooleanVerifier
@Test public void testLocationAwareFalsePropagates() throws Exception {
final Configuration configuration=createCoreConfig();
set(configuration,DOT_LOCATION_AWARE,"false");
SwiftRestClient restClient=mkInstance(configuration);
assertFalse(restClient.isLocationAware());
}
APIUtilityVerifier BooleanVerifier
@Test public void testLocationAwareTruePropagates() throws Exception {
final Configuration configuration=createCoreConfig();
set(configuration,DOT_LOCATION_AWARE,"true");
SwiftRestClient restClient=mkInstance(configuration);
assertTrue(restClient.isLocationAware());
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRecursiveDelete() throws Throwable {
Path childpath=new Path("/test/testRecursiveDelete");
String text="Testing a put and get to a file in a subdir " + System.currentTimeMillis();
writeTextFile(fs,childpath,text,false);
assertDeleted(new Path("/test"),true);
assertFalse("child entry still present " + childpath,fs.exists(childpath));
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDeleteNonexistentFile() throws Throwable {
Path path=new Path("/test/DeleteNonexistentFile");
assertFalse("delete returned true",fs.delete(path,false));
}
BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testLocateOutOfRangeLen() throws Throwable {
describe("overshooting the length is legal, as long as the" + " origin location is valid");
BlockLocation[] locations=getFs().getFileBlockLocations(createFileAndGetStatus(),0,data.length + 100);
assertNotNull(locations);
assertTrue(locations.length > 0);
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDefaultBlocksizeNonZero() throws Throwable {
assertTrue("Zero default blocksize",0L != getFs().getDefaultBlockSize());
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDefaultBlocksizeRootPathNonZero() throws Throwable {
assertTrue("Zero default blocksize",0L != getFs().getDefaultBlockSize(new Path("/")));
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDefaultBlocksizeOtherPathNonZero() throws Throwable {
assertTrue("Zero default blocksize",0L != getFs().getDefaultBlockSize(new Path("/test")));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testBlocksizeNonZeroForFile() throws Throwable {
Path smallfile=new Path("/test/smallfile");
SwiftTestUtils.writeTextFile(fs,smallfile,"blocksize",true);
createFile(smallfile);
FileStatus status=getFs().getFileStatus(smallfile);
assertTrue("Zero blocksize in " + status,status.getBlockSize() != 0L);
assertTrue("Zero replication in " + status,status.getReplication() != 0L);
}
BooleanVerifier
/**
* Test recursive root directory deletion fails if there is an entry underneath
* @throws Throwable
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRmRootDirRecursiveIsForbidden() throws Throwable {
Path root=path("/");
Path testFile=path("/test");
createFile(testFile);
assertTrue("rm(/) returned false",fs.delete(root,true));
assertExists("Root dir is missing",root);
assertPathDoesNotExist("test file not deleted",testFile);
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDeleteNonEmptyFileTwice() throws IOException {
final Path file=new Path("/test/testDeleteNonEmptyFileTwice");
createFile(file);
assertDeleted(file,true);
assertFalse("Delete returned true",fs.delete(file,false));
createFile(file);
assertDeleted(file,true);
assertFalse("Delete returned true",fs.delete(file,false));
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDeleteEmptyFileTwice() throws IOException {
final Path file=new Path("/test/testDeleteEmptyFileTwice");
createEmptyFile(file);
assertDeleted(file,true);
SwiftTestUtils.noteAction("multiple creates, and deletes");
assertFalse("Delete returned true",fs.delete(file,false));
createEmptyFile(file);
assertDeleted(file,true);
assertFalse("Delete returned true",fs.delete(file,false));
}
APIUtilityVerifier BooleanVerifier
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testMultiByteFilesAreFiles() throws Exception {
Path src=path("/test/testMultiByteFilesAreFiles");
SwiftTestUtils.writeTextFile(fs,src,"testMultiByteFilesAreFiles",false);
assertIsFile(src);
FileStatus status=fs.getFileStatus(src);
assertFalse(status.isDir());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test=path("/test");
fs.delete(test,true);
mkdirs(test);
assertExists("created test directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
Path src=path("/test/file");
SwiftTestUtils.touch(fs,src);
statuses=fs.listStatus(test);
statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length);
SwiftFileStatus stat=(SwiftFileStatus)statuses[0];
assertTrue("isDir(): Not a directory: " + stat,stat.isDir());
extraStatusAssertions(stat);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
* @throws Exception failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception {
String mixedCaseFilename="/test/UPPER.TXT";
Path upper=path(mixedCaseFilename);
Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
assertFalse("File exists" + upper,fs.exists(upper));
assertFalse("File exists" + lower,fs.exists(lower));
FSDataOutputStream out=fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus=fs.getFileStatus(upper);
assertExists("Original upper case file" + upper,upper);
assertPathDoesNotExist("lower case file",lower);
out=fs.create(lower);
out.writeUTF("l");
out.close();
assertExists("lower case file",lower);
assertExists("Original upper case file " + upper,upper);
FileStatus newStatus=fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen());
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListStatusFiltered() throws Throwable {
Path dir=path("/");
Path child=path("/test");
touch(fs,child);
FileStatus[] stats=fs.listStatus(dir,new AcceptAllFilter());
boolean found=false;
StringBuilder builder=new StringBuilder();
for ( FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(child)) {
found=true;
}
}
assertTrue("Path " + child + " not found in directory "+ dir+ ":"+ builder,found);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUpload() throws Throwable {
final Path path=new Path("/test/testFilePartUpload");
int len=8192;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
try {
int totalPartitionsToWrite=len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup",out,0);
int firstWriteLen=2048;
out.write(src,0,firstWriteLen);
long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false);
SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected);
assertPartitionsWritten("First write completed",out,expected);
int remainder=len - firstWriteLen;
SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder);
out.write(src,firstWriteLen,remainder);
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false);
assertPartitionsWritten("Remaining data",out,expected);
out.close();
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
assertPartitionsWritten("Stream closed",out,expected);
Header[] headers=fs.getStore().getObjectHeaders(path,true);
for ( Header header : headers) {
LOG.info(header.toString());
}
byte[] dest=readDataset(fs,path,len);
LOG.info("Read dataset from " + path + ": data length ="+ len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus status;
final Path qualifiedPath=path.makeQualified(fs);
status=fs.getFileStatus(qualifiedPath);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,len);
assertNotNull("Null getFileBlockLocations()",locations);
assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0);
try {
validatePathLen(path,len);
}
catch ( AssertionError e) {
throw new AssumptionViolatedException(e,null);
}
}
finally {
IOUtils.closeStream(out);
}
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {
final Path path=new Path("/test/testFilePartUploadLengthCheck");
int len=8192;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
try {
int totalPartitionsToWrite=len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup",out,0);
int firstWriteLen=2048;
out.write(src,0,firstWriteLen);
long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false);
SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected);
assertPartitionsWritten("First write completed",out,expected);
int remainder=len - firstWriteLen;
SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder);
out.write(src,firstWriteLen,remainder);
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false);
assertPartitionsWritten("Remaining data",out,expected);
out.close();
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
assertPartitionsWritten("Stream closed",out,expected);
Header[] headers=fs.getStore().getObjectHeaders(path,true);
for ( Header header : headers) {
LOG.info(header.toString());
}
byte[] dest=readDataset(fs,path,len);
LOG.info("Read dataset from " + path + ": data length ="+ len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus status=fs.getFileStatus(path);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,len);
assertNotNull("Null getFileBlockLocations()",locations);
assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0);
}
finally {
IOUtils.closeStream(out);
}
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameDirectory() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/data/logs");
final Path newPath=new Path("/test/var/logs");
fs.mkdirs(old);
fs.mkdirs(newPath.getParent());
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/alice/file");
final Path newPath=new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream=fs.create(old);
final byte[] message="Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
final FSDataInputStream bobStream=fs.open(newPath);
final byte[] bytes=new byte[512];
final int read=bobStream.read(bytes);
bobStream.close();
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(new String(message),new String(buffer));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Rename a file into a directory
* @throws Exception
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFileIntoExistingDirectory() throws Exception {
assumeRenameSupported();
Path src=path("/test/olddir/file");
createFile(src);
Path dst=path("/test/new/newdir");
fs.mkdirs(dst);
rename(src,dst,true,false,true);
Path newFile=path("/test/new/newdir/file");
if (!fs.exists(newFile)) {
String ls=ls(dst);
LOG.info(ls(path("/test/new")));
LOG.info(ls(path("/test/hadoop")));
fail("did not find " + newFile + " - directory: "+ ls);
}
assertTrue("Destination changed",fs.exists(path("/test/new/newdir/file")));
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRootDirProbeEmptyPath() throws Throwable {
SwiftObjectPath object=new SwiftObjectPath("container","");
assertTrue(SwiftUtils.isRootDir(object));
}
BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRootDirProbeRootPath() throws Throwable {
SwiftObjectPath object=new SwiftObjectPath("container","/");
assertTrue(SwiftUtils.isRootDir(object));
}
InternalCallVerifier BooleanVerifier
@Test public void testMkdirDelete() throws IOException {
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirX"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirX/dirY"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fSys.delete(new Path("/dirX/dirY"),false));
Assert.assertFalse(fSys.exists(new Path("/dirX/dirY")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fSys.delete(new Path("/dirX"),false));
Assert.assertFalse(fSys.exists(new Path("/dirX")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX")));
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws IOException {
fileSystemTestHelper.createFile(fSys,"/newDir/foo");
fSys.rename(new Path("/newDir/foo"),new Path("/newDir/fooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
fSys.mkdirs(new Path("/newDir/dirFoo"));
fSys.rename(new Path("/newDir/dirFoo"),new Path("/newDir/dirFooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
fSys.mkdirs(new Path("/testWd"));
Path workDir=new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory());
workDir=new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
absoluteDir=new Path("/test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
Path absoluteFooPath=new Path(absoluteDir,"foo");
fSys.create(absoluteFooPath).close();
fSys.open(new Path("foo")).close();
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir,"newDir")));
final String LOCAL_FS_ROOT_URI="file:///tmp/test";
absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* Verify the operation via chrootedfs (ie fSys) and *also* via the
* target file system (ie fSysTarget) that has been chrooted.
*/
@Test public void testCreateDelete() throws IOException {
fileSystemTestHelper.createFile(fSys,"/foo");
Assert.assertTrue(fSys.isFile(new Path("/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"foo")));
fileSystemTestHelper.createFile(fSys,"/newDir/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.delete(new Path("/newDir/foo"),false));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
fileSystemTestHelper.createFile(fSys,"/newDir/newDir2/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
Assert.assertTrue(fSys.delete(new Path("/newDir/newDir2/foo"),false));
Assert.assertFalse(fSys.exists(new Path("/newDir/newDir2/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/newDir2/foo")));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testList() throws IOException {
FileStatus fs=fSys.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
Assert.assertEquals(fs.getPath(),chrootedTo);
FileStatus[] dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(0,dirPaths.length);
fileSystemTestHelper.createFile(fSys,"/foo");
fileSystemTestHelper.createFile(fSys,"/bar");
fSys.mkdirs(new Path("/dirX"));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirY"));
fSys.mkdirs(new Path("/dirX/dirXX"));
dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(4,dirPaths.length);
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"foo"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"bar"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirX"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirY"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws IOException {
fileContextTestHelper.createFile(fc,"/newDir/foo");
fc.rename(new Path("/newDir/foo"),new Path("/newDir/fooBar"));
Assert.assertFalse(exists(fc,new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(isFile(fc,fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/fooBar")));
fc.mkdir(new Path("/newDir/dirFoo"),FileContext.DEFAULT_PERM,false);
fc.rename(new Path("/newDir/dirFoo"),new Path("/newDir/dirFooBar"));
Assert.assertFalse(exists(fc,new Path("/newDir/dirFoo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(isDir(fc,fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"newDir/dirFooBar")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testList() throws IOException {
FileStatus fs=fc.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
Assert.assertEquals(fs.getPath(),chrootedTo);
FileStatus[] dirPaths=fc.util().listStatus(new Path("/"));
Assert.assertEquals(0,dirPaths.length);
fileContextTestHelper.createFileNonRecursive(fc,"/foo");
fileContextTestHelper.createFileNonRecursive(fc,"/bar");
fc.mkdir(new Path("/dirX"),FileContext.DEFAULT_PERM,false);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirY"),FileContext.DEFAULT_PERM,false);
fc.mkdir(new Path("/dirX/dirXX"),FileContext.DEFAULT_PERM,false);
dirPaths=fc.util().listStatus(new Path("/"));
Assert.assertEquals(4,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcTarget,"foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=fileContextTestHelper.containsPath(fcTarget,"bar",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=fileContextTestHelper.containsPath(fcTarget,"dirX",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcTarget,"dirY",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
fc.mkdir(new Path("/testWd"),FileContext.DEFAULT_PERM,false);
Path workDir=new Path("/testWd");
Path fqWd=fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(fqWd.getParent(),fc.getWorkingDirectory());
workDir=new Path("/testWd");
fqWd=fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
Path fqAbsoluteDir=fc.makeQualified(absoluteDir);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory());
absoluteDir=new Path("/test/existingDir2");
fqAbsoluteDir=fc.makeQualified(absoluteDir);
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
fc.create(absolutePath,EnumSet.of(CreateFlag.CREATE)).close();
fc.open(new Path("foo")).close();
fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir")));
absoluteDir=fileContextTestHelper.getTestRootPath(fc,"nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
}
catch ( Exception e) {
}
final String LOCAL_FS_ROOT_URI="file:///tmp/test";
absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidNameValidInBaseFs() throws Exception {
AbstractFileSystem baseFs=Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs=new ChRootedFs(baseFs,new Path("/chroot"));
Mockito.doReturn(true).when(baseFs).isValidName(Mockito.anyString());
Assert.assertTrue(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
InternalCallVerifier BooleanVerifier
@Test public void testMkdirDelete() throws IOException {
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirX"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,new Path("/dirX")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"dirX")));
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirX/dirY"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,new Path("/dirX/dirY")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fc.delete(new Path("/dirX/dirY"),false));
Assert.assertFalse(exists(fc,new Path("/dirX/dirY")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fc.delete(new Path("/dirX"),false));
Assert.assertFalse(exists(fc,new Path("/dirX")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"dirX")));
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* Verify the operation via chrootedfs (ie fc) and *also* via the
* target file system (ie fclocal) that has been chrooted.
*/
@Test public void testCreateDelete() throws IOException {
fileContextTestHelper.createFileNonRecursive(fc,"/foo");
Assert.assertTrue(isFile(fc,new Path("/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"foo")));
fileContextTestHelper.createFile(fc,"/newDir/foo");
Assert.assertTrue(isFile(fc,new Path("/newDir/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fc.delete(new Path("/newDir/foo"),false));
Assert.assertFalse(exists(fc,new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/foo")));
fileContextTestHelper.createFile(fc,"/newDir/newDir2/foo");
Assert.assertTrue(isFile(fc,new Path("/newDir/newDir2/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/newDir2/foo")));
Assert.assertTrue(fc.delete(new Path("/newDir/newDir2/foo"),false));
Assert.assertFalse(exists(fc,new Path("/newDir/newDir2/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/newDir2/foo")));
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidNameInvalidInBaseFs() throws Exception {
AbstractFileSystem baseFs=Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs=new ChRootedFs(baseFs,new Path("/chroot"));
Mockito.doReturn(false).when(baseFs).isValidName(Mockito.anyString());
Assert.assertFalse(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetChildFileSystems() throws Exception {
assertNull(fs1.getChildFileSystems());
assertNull(fs2.getChildFileSystems());
List children=Arrays.asList(viewFs.getChildFileSystems());
assertEquals(2,children.size());
assertTrue(children.contains(fs1));
assertTrue(children.contains(fs2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddDelegationTokens() throws Exception {
Credentials creds=new Credentials();
Token> fs1Tokens[]=addTokensWithCreds(fs1,creds);
assertEquals(1,fs1Tokens.length);
assertEquals(1,creds.numberOfTokens());
Token> fs2Tokens[]=addTokensWithCreds(fs2,creds);
assertEquals(1,fs2Tokens.length);
assertEquals(2,creds.numberOfTokens());
Credentials savedCreds=creds;
creds=new Credentials();
Token> viewFsTokens[]=viewFs.addDelegationTokens("me",creds);
assertEquals(2,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
viewFsTokens=viewFs.addDelegationTokens("me",creds);
assertEquals(0,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetFileChecksum() throws IOException, URISyntaxException {
fileSystemTestHelper.createFile(fHdfs,someFile);
fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512);
FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile));
FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other"));
assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum);
assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateNonRecursive() throws IOException {
Path path=fileSystemTestHelper.getTestRootPath(fsView,"/user/foo");
fsView.createNonRecursive(path,false,1024,(short)1,1024L,null);
FileStatus status=fsView.getFileStatus(new Path("/user/foo"));
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
* Verify the operation via mountfs (ie fSys) and *also* via the
* target file system (ie fSysLocal) that the mount link points-to.
*/
@Test public void testOperationsThroughMountLinks() throws IOException {
fileSystemTestHelper.createFile(fsView,"/user/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Delete should suceed",fsView.delete(new Path("/user/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"user/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir2/foo")));
Assert.assertTrue("Delete should suceed",fsView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"dir2/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir3/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/internalDir/internalDir2/linkToDir3/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"dir3/foo")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX"));
Assert.assertTrue("New dir should be type dir",fsView.isDirectory(new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX/dirY"));
Assert.assertTrue("New dir should be type dir",fsView.isDirectory(new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/user/dirX/dirY"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/dirX/dirY")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/user/dirX"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/dirX")));
Assert.assertFalse(fsTarget.exists(new Path(targetTestRoot,"user/dirX")));
fileSystemTestHelper.createFile(fsView,"/user/foo");
fsView.rename(new Path("/user/foo"),new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Renamed src should not exist in target",fsTarget.exists(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Renamed dest should exist as file",fsView.isFile(fileSystemTestHelper.getTestRootPath(fsView,"/user/fooBar")));
Assert.assertTrue("Renamed dest should exist as file in target",fsTarget.isFile(new Path(targetTestRoot,"user/fooBar")));
fsView.mkdirs(new Path("/user/dirFoo"));
fsView.rename(new Path("/user/dirFoo"),new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",fsView.exists(new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",fsTarget.exists(new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",fsView.isDirectory(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
boolean dirFooPresent=false;
for ( FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent=true;
}
}
Assert.assertTrue(dirFooPresent);
}
InternalCallVerifier BooleanVerifier
@Test public void testRootReadableExecutable() throws IOException {
Assert.assertFalse("In root before cd",fsView.getWorkingDirectory().isRoot());
fsView.setWorkingDirectory(new Path("/"));
Assert.assertTrue("Not in root dir after cd",fsView.getWorkingDirectory().isRoot());
verifyRootChildren(fsView.listStatus(fsView.getWorkingDirectory()));
final FileStatus rootStatus=fsView.getFileStatus(fsView.getWorkingDirectory());
final FsPermission perms=rootStatus.getPermission();
Assert.assertTrue("User-executable permission not set!",perms.getUserAction().implies(FsAction.EXECUTE));
Assert.assertTrue("User-readable permission not set!",perms.getUserAction().implies(FsAction.READ));
Assert.assertTrue("Group-executable permission not set!",perms.getGroupAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Group-readable permission not set!",perms.getGroupAction().implies(FsAction.READ));
Assert.assertTrue("Other-executable permission not set!",perms.getOtherAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Other-readable permission not set!",perms.getOtherAction().implies(FsAction.READ));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetBlockLocations() throws IOException {
Path targetFilePath=new Path(targetTestRoot,"data/largeFile");
FileSystemTestHelper.createFile(fsTarget,targetFilePath,10,1024);
Path viewFilePath=new Path("/data/largeFile");
Assert.assertTrue("Created File should be type File",fsView.isFile(viewFilePath));
BlockLocation[] viewBL=fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100);
Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length);
BlockLocation[] targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100);
compareBLs(viewBL,targetBL);
fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100);
targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100);
compareBLs(viewBL,targetBL);
}
BooleanVerifier
@Test public void testFileStatusOnMountLink() throws IOException {
Assert.assertTrue(fsView.getFileStatus(new Path("/")).isDirectory());
checkFileStatus(fsView,"/",fileType.isDir);
checkFileStatus(fsView,"/user",fileType.isDir);
checkFileStatus(fsView,"/data",fileType.isDir);
checkFileStatus(fsView,"/internalDir",fileType.isDir);
checkFileStatus(fsView,"/internalDir/linkToDir2",fileType.isDir);
checkFileStatus(fsView,"/internalDir/internalDir2/linkToDir3",fileType.isDir);
checkFileStatus(fsView,"/linkToAFile",fileType.isFile);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testListOnMountTargetDirs() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/data"));
FileStatus fs;
Assert.assertEquals(0,dirPaths.length);
long len=fileSystemTestHelper.createFile(fsView,"/data/foo");
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(1,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
Assert.assertEquals(len,fs.getLen());
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/data/dirX"));
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
fs=fileSystemTestHelper.containsPath(fsView,"/data/dirX",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created dir should appear as a dir",fs.isDirectory());
}
BooleanVerifier
@Test public void testMkdirOfMountLink() throws IOException {
Assert.assertTrue("mkdir of existing mount link should succeed",fsView.mkdirs(new Path("/data")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/"));
FileStatus fs;
verifyRootChildren(dirPaths);
dirPaths=fsView.listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isDirectory());
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=AccessControlException.class) public void testInternalDeleteExisting2() throws IOException {
Assert.assertTrue("Delete of link to dir should succeed",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.delete(new Path("/internalDir/linkToDir2"),false);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testFileStatusOnMountLink() throws IOException {
Assert.assertTrue("Slash should appear as dir",fcView.getFileStatus(new Path("/")).isDirectory());
checkFileStatus(fcView,"/",fileType.isDir);
checkFileStatus(fcView,"/user",fileType.isDir);
checkFileStatus(fcView,"/data",fileType.isDir);
checkFileStatus(fcView,"/internalDir",fileType.isDir);
checkFileStatus(fcView,"/internalDir/linkToDir2",fileType.isDir);
checkFileStatus(fcView,"/internalDir/internalDir2/linkToDir3",fileType.isDir);
checkFileStatus(fcView,"/linkToAFile",fileType.isFile);
try {
fcView.getFileStatus(new Path("/danglingLink"));
Assert.fail("Excepted a not found exception here");
}
catch ( FileNotFoundException e) {
}
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
* Verify the operation via mountfs (ie fc) and *also* via the
* target file system (ie fclocal) that the mount link points-to.
*/
@Test public void testOperationsThroughMountLinks() throws IOException {
fileContextTestHelper.createFileNonRecursive(fcView,"/user/foo");
Assert.assertTrue("Create file should be file",isFile(fcView,new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",isFile(fcTarget,new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/foo"),false));
Assert.assertFalse("File should not exist after delete",exists(fcView,new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",exists(fcTarget,new Path(targetTestRoot,"user/foo")));
fileContextTestHelper.createFileNonRecursive(fcView,"/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",isFile(fcView,new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",isFile(fcTarget,new Path(targetTestRoot,"dir2/foo")));
Assert.assertTrue("Delete should suceed",fcView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after deletion",exists(fcView,new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target should not exist after deletion",exists(fcTarget,new Path(targetTestRoot,"dir2/foo")));
fileContextTestHelper.createFileNonRecursive(fcView,"/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be of type file",isFile(fcView,new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should also be type file",isFile(fcTarget,new Path(targetTestRoot,"dir3/foo")));
fileContextTestHelper.createFile(fcView,"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be of type file",isFile(fcView,new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should also be type file",isFile(fcTarget,new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/internalDir/internalDir2/linkToDir3/foo"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target of deleted file should not exist",exists(fcTarget,new Path(targetTestRoot,"dir3/foo")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue("New dir should be type dir",isDir(fcView,new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",isDir(fcTarget,new Path(targetTestRoot,"user/dirX")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX/dirY"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue("New dir should be type dir",isDir(fcView,new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",isDir(fcTarget,new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/dirX/dirY"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/user/dirX/dirY")));
Assert.assertFalse("Deleted Target should not exist",exists(fcTarget,new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/dirX"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/user/dirX")));
Assert.assertFalse("Deleted Target should not exist",exists(fcTarget,new Path(targetTestRoot,"user/dirX")));
fileContextTestHelper.createFile(fcView,"/user/foo");
fcView.rename(new Path("/user/foo"),new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",exists(fcView,new Path("/user/foo")));
Assert.assertFalse(exists(fcTarget,new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(isFile(fcView,fileContextTestHelper.getTestRootPath(fcView,"/user/fooBar")));
Assert.assertTrue(isFile(fcTarget,new Path(targetTestRoot,"user/fooBar")));
fcView.mkdir(new Path("/user/dirFoo"),FileContext.DEFAULT_PERM,false);
fcView.rename(new Path("/user/dirFoo"),new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",exists(fcView,new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",exists(fcTarget,new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",isDir(fcView,fileContextTestHelper.getTestRootPath(fcView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
fcView.mkdir(new Path("/targetRoot/dirFoo"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(exists(fcView,new Path("/targetRoot/dirFoo")));
boolean dirFooPresent=false;
RemoteIterator dirContents=fcView.listStatus(new Path("/targetRoot/"));
while (dirContents.hasNext()) {
FileStatus fileStatus=dirContents.next();
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent=true;
}
}
Assert.assertTrue(dirFooPresent);
}
BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=AccessControlException.class) public void testInternalRename2() throws IOException {
Assert.assertTrue("linkTODir2 should be a dir",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.rename(new Path("/internalDir/linkToDir2"),new Path("/internalDir/dir1"));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fcView.util().listStatus(new Path("/"));
FileStatus fs;
Assert.assertEquals(7,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
dirPaths=fcView.util().listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
TestCleaner BranchVerifier BooleanVerifier HybridVerifier
@After public void tearDown() throws Exception {
LOG.info("tearDown starting");
tearDownAll();
stopServer();
portNumLockFile.close();
portNumFile.delete();
if (tmpDir != null) {
Assert.assertTrue("delete " + tmpDir.toString(),recursiveDelete(tmpDir));
}
serverFactory=null;
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that, when the callback fails to enter active state,
* the elector rejoins the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockApp).becomeActive();
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that, when the callback fails to enter active state, after
* a ZK disconnect (i.e from the StatCallback), that the elector rejoins
* the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat=new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat);
Mockito.verify(mockApp,Mockito.times(1)).becomeActive();
Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
BooleanVerifier
/**
* the test creates 2 electors which try to become active using a real
* zookeeper server. It verifies that 1 becomes active and 1 becomes standby.
* Upon becoming active the leader quits election and the test verifies that
* the standby now becomes active.
*/
@Test(timeout=20000) public void testActiveStandbyTransition() throws Exception {
LOG.info("starting test with parentDir:" + PARENT_DIR);
assertFalse(electors[0].parentZNodeExists());
electors[0].ensureParentZNode();
assertTrue(electors[0].parentZNodeExists());
electors[0].joinElection(appDatas[0]);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,zkServer,PARENT_DIR,appDatas[0]);
Mockito.verify(cbs[0],Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
electors[1].joinElection(appDatas[1]);
Mockito.verify(cbs[1],Mockito.timeout(1000)).becomeStandby();
checkFatalsAndReset();
electors[0].quitElection(true);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,zkServer,PARENT_DIR,appDatas[1]);
Mockito.verify(cbs[1],Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
electors[0].joinElection(appDatas[0]);
Mockito.verify(cbs[0],Mockito.timeout(1000)).becomeStandby();
checkFatalsAndReset();
electors[1].preventSessionReestablishmentForTests();
try {
zkServer.closeSession(electors[1].getZKSessionIdForTests());
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,zkServer,PARENT_DIR,appDatas[0]);
Mockito.verify(cbs[1],Mockito.timeout(1000)).enterNeutralMode();
Mockito.verify(cbs[0],Mockito.timeout(1000)).fenceOldActive(AdditionalMatchers.aryEq(appDatas[1]));
Mockito.verify(cbs[0],Mockito.timeout(1000)).becomeActive();
}
finally {
electors[1].allowSessionReestablishmentForTests();
}
Mockito.verify(cbs[1],Mockito.timeout(5000)).becomeStandby();
checkFatalsAndReset();
electors[0].preventSessionReestablishmentForTests();
try {
zkServer.closeSession(electors[0].getZKSessionIdForTests());
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,zkServer,PARENT_DIR,appDatas[1]);
Mockito.verify(cbs[0],Mockito.timeout(1000)).enterNeutralMode();
Mockito.verify(cbs[1],Mockito.timeout(1000)).fenceOldActive(AdditionalMatchers.aryEq(appDatas[0]));
Mockito.verify(cbs[1],Mockito.timeout(1000)).becomeActive();
}
finally {
electors[0].allowSessionReestablishmentForTests();
}
checkFatalsAndReset();
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testFailoverWithoutPermission() throws Exception {
DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr);
Mockito.doThrow(new AccessControlException("Access denied")).when(svc1.proxy).getServiceStatus();
DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr);
Mockito.doThrow(new AccessControlException("Access denied")).when(svc2.proxy).getServiceStatus();
svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1,svc2,false,false);
fail("Can't failover when access is denied");
}
catch ( FailoverFailedException ffe) {
assertTrue(ffe.getCause().getMessage().contains("Access denied"));
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that the proper state is propagated when the health monitor
* sees an uncaught exception in its thread.
*/
@Test(timeout=15000) public void testHealthMonitorDies() throws Exception {
LOG.info("Mocking RTE in health monitor, waiting for FAILED");
throwOOMEOnCreate=true;
svc.actUnreachable=true;
waitForState(hm,HealthMonitor.State.HEALTH_MONITOR_FAILED);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=15000) public void testMonitor() throws Exception {
LOG.info("Mocking bad health check, waiting for UNHEALTHY");
svc.isHealthy=false;
waitForState(hm,HealthMonitor.State.SERVICE_UNHEALTHY);
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.isHealthy=true;
waitForState(hm,HealthMonitor.State.SERVICE_HEALTHY);
LOG.info("Returning an IOException, as if node went down");
int countBefore=createProxyCount.get();
svc.actUnreachable=true;
waitForState(hm,HealthMonitor.State.SERVICE_NOT_RESPONDING);
while (createProxyCount.get() < countBefore + 3) {
Thread.sleep(10);
}
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.actUnreachable=false;
waitForState(hm,HealthMonitor.State.SERVICE_HEALTHY);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testWhitespaceAndCommentsInConfig() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("\n" + " # the next one will always fail\n" + " " + AlwaysFailFencer.class.getName() + "(foo) # <- fails\n"+ AlwaysSucceedFencer.class.getName()+ "(bar) \n");
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1,AlwaysFailFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysFailFencer.fencedSvc);
assertEquals(1,AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc);
assertEquals("foo",AlwaysFailFencer.callArgs.get(0));
assertEquals("bar",AlwaysSucceedFencer.callArgs.get(0));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleFencers() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)\n" + AlwaysSucceedFencer.class.getName()+ "(bar)\n");
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1,AlwaysSucceedFencer.fenceCalled);
assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0));
}
BooleanVerifier
@Test public void testShortNameSsh() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("sshfence");
assertFalse(fencer.fence(MOCK_TARGET));
}
BooleanVerifier
@Test public void testShortNameSshWithUser() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("sshfence(user)");
assertFalse(fencer.fence(MOCK_TARGET));
}
BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testArglessFencer() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName());
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1,AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc);
assertEquals(null,AlwaysSucceedFencer.callArgs.get(0));
}
BooleanVerifier
@Test public void testShortNameShell() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer(getFencerTrueCommand());
assertTrue(fencer.fence(MOCK_TARGET));
}
BooleanVerifier
@Test public void testShortNameSshWithPort() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("sshfence(:123)");
assertFalse(fencer.fence(MOCK_TARGET));
}
BooleanVerifier
@Test public void testShortNameSshWithUserPort() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("sshfence(user:123)");
assertFalse(fencer.fence(MOCK_TARGET));
}
BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testSingleFencer() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)");
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1,AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc);
assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0));
}
BooleanVerifier
/**
* Test that lines on stdout get passed as INFO
* level messages
*/
@Test public void testStdoutLogging(){
assertTrue(fencer.tryFence(TEST_TARGET,"echo hello"));
Mockito.verify(ShellCommandFencer.LOG).info(Mockito.endsWith("echo hello: hello"));
}
InternalCallVerifier BooleanVerifier
/**
* Test that the exit code of the script determines
* whether the fencer succeeded or failed
*/
@Test public void testBasicSuccessFailure(){
assertTrue(fencer.tryFence(TEST_TARGET,"echo"));
assertFalse(fencer.tryFence(TEST_TARGET,"exit 1"));
assertFalse(fencer.tryFence(TEST_TARGET,"xxxxxxxxxxxx"));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCheckParensNoArgs(){
try {
Configuration conf=new Configuration();
new NodeFencer(conf,"shell()");
fail("Didn't throw when passing no args to shell");
}
catch ( BadFencingConfigurationException confe) {
assertTrue("Unexpected exception:" + StringUtils.stringifyException(confe),confe.getMessage().contains("Unable to parse line: 'shell()'"));
}
}
BooleanVerifier
/**
* Test that lines on stderr get passed as
* WARN level log messages
*/
@Test public void testStderrLogging(){
assertTrue(fencer.tryFence(TEST_TARGET,"echo hello>&2"));
Mockito.verify(ShellCommandFencer.LOG).warn(Mockito.endsWith("echo hello>&2: hello"));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCheckNoArgs(){
try {
Configuration conf=new Configuration();
new NodeFencer(conf,"shell");
fail("Didn't throw when passing no args to shell");
}
catch ( BadFencingConfigurationException confe) {
assertTrue("Unexpected exception:" + StringUtils.stringifyException(confe),confe.getMessage().contains("No argument passed"));
}
}
BooleanVerifier
/**
* Test that we properly close off our input to the subprocess
* such that it knows there's no tty connected. This is important
* so that, if we use 'ssh', it won't try to prompt for a password
* and block forever, for example.
*/
@Test(timeout=10000) public void testSubprocessInputIsClosed(){
assertFalse(fencer.tryFence(TEST_TARGET,"read"));
}
InternalCallVerifier BooleanVerifier
/**
* Test connecting to a host which definitely won't respond.
* Make sure that it times out and returns false, but doesn't throw
* any exception
*/
@Test(timeout=20000) public void testConnectTimeout() throws BadFencingConfigurationException {
Configuration conf=new Configuration();
conf.setInt(SshFenceByTcpPort.CONF_CONNECT_TIMEOUT_KEY,3000);
SshFenceByTcpPort fence=new SshFenceByTcpPort();
fence.setConf(conf);
assertFalse(fence.tryFence(UNFENCEABLE_TARGET,""));
}
InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=20000) public void testFence() throws BadFencingConfigurationException {
Assume.assumeTrue(isConfigured());
Configuration conf=new Configuration();
conf.set(SshFenceByTcpPort.CONF_IDENTITIES_KEY,TEST_KEYFILE);
SshFenceByTcpPort fence=new SshFenceByTcpPort();
fence.setConf(conf);
assertTrue(fence.tryFence(TEST_TARGET,null));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test(timeout=15000) public void testCedeActive() throws Exception {
try {
cluster.start();
DummyZKFC zkfc=cluster.getZkfc(0);
assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests());
ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000);
long st=Time.now();
proxy.cedeActive(3000);
long et=Time.now();
assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000);
assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests());
cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY);
long et2=Time.now();
assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800);
}
finally {
cluster.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Test shutting down the ShortCircuitCache while there are things in it.
*/
@Test public void testShortCircuitCacheShutdown() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShortCircuitCacheShutdown",sockDir);
conf.set(DFS_CLIENT_CONTEXT,"testShortCircuitCacheShutdown");
Configuration serverConf=new Configuration(conf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
cache.close();
Assert.assertTrue(cache.getDfsClientShmManager().getDomainSocketWatcher().isClosed());
cluster.shutdown();
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test the case where we have a failure to complete a short circuit read
* that occurs, and then later on, we have a success.
* Any thread waiting on a cache load should receive the failure (if it
* occurs); however, the failure result should not be cached. We want
* to be able to retry later and succeed.
*/
@Test(timeout=60000) public void testShortCircuitCacheTemporaryFailure() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicBoolean replicaCreationShouldFail=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
if (replicaCreationShouldFail.get()) {
Uninterruptibles.sleepUninterruptibly(2,TimeUnit.SECONDS);
return new ShortCircuitReplicaInfo();
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShortCircuitCacheTemporaryFailure",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int NUM_THREADS=2;
final int SEED=0xFADED;
final CountDownLatch gotFailureLatch=new CountDownLatch(NUM_THREADS);
final CountDownLatch shouldRetryLatch=new CountDownLatch(1);
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
LocatedBlock lblock=locatedBlocks.get(0);
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
Assert.fail("expected getBlockReader to fail the first time.");
}
catch ( Throwable t) {
Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t,t.getMessage().contains("TCP reads were disabled for testing"));
}
finally {
if (blockReader != null) blockReader.close();
}
gotFailureLatch.countDown();
shouldRetryLatch.await();
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
}
catch ( Throwable t) {
LOG.error("error trying to retrieve a block reader " + "the second time.",t);
throw t;
}
finally {
if (blockReader != null) blockReader.close();
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
gotFailureLatch.await();
replicaCreationShouldFail.set(false);
shouldRetryLatch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* When an InterruptedException is sent to a thread calling
* FileChannel#read, the FileChannel is immediately closed and the
* thread gets an exception. This effectively means that we might have
* someone asynchronously calling close() on the file descriptors we use
* in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in
* ShortCircuitCache#unref, we should check if the FileChannel objects
* are still open. If not, we should purge the replica to avoid giving
* it out to any future readers.
* This is a regression test for HDFS-6227: Short circuit read failed
* due to ClosedChannelException.
* Note that you may still get ClosedChannelException errors if two threads
* are reading from the same replica and an InterruptedException is delivered
* to one of them.
*/
@Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicInteger replicasCreated=new AtomicInteger(0);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
replicasCreated.incrementAndGet();
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4095;
final int SEED=0xFADE0;
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
final Semaphore sem=new Semaphore(0);
final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
final LocatedBlock lblock=locatedBlocks.get(0);
final byte[] buf=new byte[TEST_FILE_LEN];
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
while (true) {
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
sem.release();
try {
blockReader.readAll(buf,0,TEST_FILE_LEN);
}
finally {
sem.acquireUninterruptibly();
}
}
catch ( ClosedByInterruptException e) {
LOG.info("got the expected ClosedByInterruptException",e);
sem.release();
break;
}
finally {
if (blockReader != null) blockReader.close();
}
LOG.info("read another " + TEST_FILE_LEN + " bytes.");
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
sem.release();
}
}
}
;
Thread thread=new Thread(readerRunnable);
thread.start();
while (thread.isAlive()) {
sem.acquireUninterruptibly();
thread.interrupt();
sem.release();
}
Assert.assertFalse(testFailed.get());
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
blockReader.readFully(buf,0,TEST_FILE_LEN);
}
finally {
if (blockReader != null) blockReader.close();
}
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(buf,expected));
Assert.assertEquals(2,replicasCreated.get());
dfs.close();
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* If we have a UNIX domain socket configured,
* and we have dfs.client.domain.socket.data.traffic set to true,
* and short-circuit access fails, we should still be able to pass
* data traffic over the UNIX domain socket. Test this.
*/
@Test(timeout=60000) public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting=true;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic",sockDir);
clientConf.set(DFS_CLIENT_CONTEXT,"testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,true);
Configuration serverConf=new Configuration(clientConf);
serverConf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY,false);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
FileSystem dfs=FileSystem.get(cluster.getURI(0),clientConf);
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(0,vinfo.notFull.size());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout=60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
final AtomicBoolean creationIsBlocked=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true,false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testMultipleWaitersOnShortCircuitCache",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADED;
final int NUM_THREADS=10;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
}
catch ( Throwable e) {
LOG.error("readerRunnable error",e);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null,cache.getDfsClientShmManager());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
HdfsConfiguration conf=getConfiguration(null);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
FSDataInputStream dis=cluster.getFileSystem().open(path);
ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(0,buf.position());
assertEquals(buf.capacity(),buf.limit());
dis=cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(3,buf.position());
assertEquals(25,buf.limit());
cluster.shutdown();
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testGetNewStamp() throws IOException {
int numDataNodes=1;
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
NamenodeProtocols namenode=cluster.getNameNodeRpc();
Path file=new Path("dataprotocol.dat");
DFSTestUtil.createFile(fileSys,file,1L,(short)numDataNodes,0L);
ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fileSys,file);
try {
namenode.updateBlockForPipeline(firstBlock,"");
Assert.fail("Can not get a new GS from a finalized block");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is not under Construction"));
}
try {
long newBlockId=firstBlock.getBlockId() + 1;
ExtendedBlock newBlock=new ExtendedBlock(firstBlock.getBlockPoolId(),newBlockId,0,firstBlock.getGenerationStamp());
namenode.updateBlockForPipeline(newBlock,"");
Assert.fail("Cannot get a new GS from a non-existent block");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("does not exist"));
}
DFSOutputStream out=null;
try {
out=(DFSOutputStream)(fileSys.append(file).getWrappedStream());
out.write(1);
out.hflush();
FSDataInputStream in=null;
try {
in=fileSys.open(file);
firstBlock=DFSTestUtil.getAllBlocks(in).get(0).getBlock();
}
finally {
IOUtils.closeStream(in);
}
DFSClient dfs=((DistributedFileSystem)fileSys).dfs;
try {
namenode.updateBlockForPipeline(firstBlock,"test" + dfs.clientName);
Assert.fail("Cannot get a new GS for a non lease holder");
}
catch ( LeaseExpiredException e) {
Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
try {
namenode.updateBlockForPipeline(firstBlock,null);
Assert.fail("Cannot get a new GS for a null lease holder");
}
catch ( LeaseExpiredException e) {
Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
namenode.updateBlockForPipeline(firstBlock,dfs.clientName);
}
finally {
IOUtils.closeStream(out);
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testDFSAddressConfig() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList dns=cluster.getDataNodes();
DataNode dn=dns.get(0);
String selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0");
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier
/**
* Test to verify IPFailoverProxyProvider is not requiring logical URI.
*/
@Test public void testIPFailoverProxyProviderLogicalUri() throws Exception {
Configuration config=new HdfsConfiguration(conf);
URI nnUri=cluster.getURI(0);
config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + nnUri.getHost(),IPFailoverProxyProvider.class.getName());
assertFalse("IPFailoverProxyProvider should not use logical URI.",HAUtil.useLogicalUri(config,nnUri));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath());
FileSystem fs2=withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test public void testWrappedFailoverProxyProvider() throws Exception {
Configuration config=new HdfsConfiguration(conf);
String logicalName=HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,config,logicalName);
config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,DummyLegacyFailoverProxyProvider.class.getName());
Path p=new Path("hdfs://" + logicalName + "/");
SecurityUtil.setTokenServiceUseIp(false);
assertTrue("Legacy proxy providers should use logical URI.",HAUtil.useLogicalUri(config,p.toUri()));
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Make sure that a helpful error message is shown if a proxy provider is
* configured for a given URI, but no actual addresses are configured for that
* URI.
*/
@Test public void testFailureWithMisconfiguredHaNNs() throws Exception {
String logicalHost="misconfigured-ha-uri";
Configuration conf=new Configuration();
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,ConfiguredFailoverProxyProvider.class.getName());
URI uri=new URI("hdfs://" + logicalHost + "/test");
try {
FileSystem.get(uri,conf).exists(new Path("/test"));
fail("Successfully got proxy provider for misconfigured FS");
}
catch ( IOException ioe) {
LOG.info("got expected exception",ioe);
assertTrue("expected exception did not contain helpful message",StringUtils.stringifyException(ioe).contains("Could not find any configured addresses for URI " + uri));
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test that a DFSClient waits for random time before retry on busy blocks.
*/
@Test public void testDFSClientRetriesOnBusyBlocks() throws IOException {
System.out.println("Testing DFSClient random waiting on busy blocks.");
int xcievers=2;
int fileLen=6 * 1024 * 1024;
int threads=50;
int retries=3;
int timeWin=300;
long timestamp=Time.now();
boolean pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
long timestamp2=Time.now();
if (pass) {
LOG.info("Test 1 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
else {
LOG.warn("Test 1 failed, but relax. Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
retries=50;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
assertTrue("Something wrong! Test 2 got Exception with maxmum retries!",pass);
LOG.info("Test 2 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
retries=3;
timeWin=1000;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
if (pass) {
LOG.info("Test 3 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
else {
LOG.warn("Test 3 failed, but relax. Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
retries=50;
timeWin=1000;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
assertTrue("Something wrong! Test 4 got Exception with maxmum retries!",pass);
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetFileChecksum() throws Exception {
final String f="/testGetFileChecksum";
final Path p=new Path(f);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L);
final FileChecksum cs1=fs.getFileChecksum(p);
assertTrue(cs1 != null);
final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks();
final DatanodeInfo first=locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
final FileChecksum cs2=fs.getFileChecksum(p);
assertEquals(cs1,cs2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test public void testFailuresArePerOperation() throws Exception {
long fileSize=4096;
Path file=new Path("/testFile");
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,2 * 1000);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
int maxBlockAcquires=client.getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs,file,fileSize,(short)1,12345L);
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
fail("Didn't get exception");
}
catch ( IOException ioe) {
DFSClient.LOG.info("Got expected exception",ioe);
}
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
DFSClient.LOG.info("Starting test case for failure reset");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
DFSInputStream is=client.open(file.toString());
byte buf[]=new byte[10];
IOUtils.readFully(is,buf,0,buf.length);
DFSClient.LOG.info("First read successful after some failures.");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
is.openInfo();
is.seek(0);
IOUtils.readFully(is,buf,0,buf.length);
}
finally {
cluster.shutdown();
}
}
BooleanVerifier
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({"serial","unchecked"}) @Test public void testNotYetReplicatedErrors() throws IOException {
final String exceptionMsg="Nope, not replicated yet...";
final int maxRetries=1;
conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,maxRetries);
NamenodeProtocols mockNN=mock(NamenodeProtocols.class);
Answer answer=new ThrowsException(new IOException()){
int retryCount=0;
@Override public Object answer( InvocationOnMock invocation) throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if (retryCount > maxRetries + 1) throw new IOException("Retried too many times: " + retryCount);
else throw new RemoteException(NotReplicatedYetException.class.getName(),exceptionMsg);
}
}
;
when(mockNN.addBlock(anyString(),anyString(),any(ExtendedBlock.class),any(DatanodeInfo[].class),anyLong(),any(String[].class))).thenAnswer(answer);
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mockNN).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final DFSClient client=new DFSClient(null,mockNN,conf,null);
OutputStream os=client.create("testfile",true);
os.write(20);
try {
os.close();
}
catch ( Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),e.getMessage().equals(exceptionMsg));
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests mkdir will not create directory when parent is missing.
*/
@Test public void testMkdir() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
assertTrue(dfs.mkdir(new Path("/mkdir-" + Time.now()),FsPermission.getDefault()));
IOException expectedException=null;
String filePath="/mkdir-file-" + Time.now();
DFSTestUtil.writeFile(dfs,new Path(filePath),"hello world");
try {
dfs.mkdir(new Path(filePath + "/mkdir"),FsPermission.getDefault());
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a directory when parent dir exists as file using" + " mkdir() should throw ParentNotDirectoryException ",expectedException != null && expectedException instanceof ParentNotDirectoryException);
expectedException=null;
try {
dfs.mkdir(new Path("/non-exist/mkdir-" + Time.now()),FsPermission.getDefault());
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a directory in a non-exist parent dir using" + " mkdir() should throw FileNotFoundException ",expectedException != null && expectedException instanceof FileNotFoundException);
}
finally {
dfs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests mkdirs can create a directory that does not exist and will
* not create a subdirectory off a file. Regression test for HADOOP-281.
*/
@Test public void testDFSMkdirs() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fileSys=cluster.getFileSystem();
try {
Path myPath=new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
Path myFile=new Path("/test/mkdirs/myFile");
DFSTestUtil.writeFile(fileSys,myFile,"hello world");
Path myIllegalPath=new Path("/test/mkdirs/myFile/subdir");
Boolean exist=true;
try {
fileSys.mkdirs(myIllegalPath);
}
catch ( IOException e) {
exist=false;
}
assertFalse(exist);
assertFalse(fileSys.exists(myIllegalPath));
fileSys.delete(myFile,true);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart=getTotalDfsUsed(cluster);
{
final int fileCount=100;
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
createFile(fs,a);
}
long dfsUsedMax=getTotalDfsUsed(cluster);
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
fs.delete(a,false);
}
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
long dfsUsedFinal=getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal);
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRename() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
{
Path a=new Path(dir,"a");
Path aa=new Path(dir,"aa");
Path b=new Path(dir,"b");
createFile(fs,a);
assertEquals(0,countLease(cluster));
DataOutputStream aa_out=fs.create(aa);
aa_out.writeBytes("something");
assertEquals(1,countLease(cluster));
list(fs,"rename0");
fs.rename(a,b);
list(fs,"rename1");
aa_out.writeBytes(" more");
aa_out.close();
list(fs,"rename2");
assertEquals(0,countLease(cluster));
}
{
Path dstPath=new Path("/c/d");
assertFalse(fs.exists(dstPath));
assertFalse(fs.rename(dir,dstPath));
}
{
Path src=new Path("/a/b");
Path dst=new Path("/a/b/c");
createFile(fs,new Path(src,"foo"));
assertFalse(fs.rename(src,dst));
assertFalse(fs.rename(src.getParent(),dst.getParent()));
}
{
Path src=new Path("/testPrefix");
Path dst=new Path("/testPrefixfile");
createFile(fs,src);
assertTrue(fs.rename(src,dst));
}
{
Path src=new Path("/a/b/c");
createFile(fs,src);
assertTrue(fs.rename(src,src));
assertFalse(fs.rename(new Path("/a/b"),new Path("/a/b/")));
assertTrue(fs.rename(src,new Path("/a/b/c/")));
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check the blocks of dst file are cleaned after rename with overwrite
*/
@Test(timeout=120000) public void testRenameWithOverwrite() throws Exception {
final short replFactor=2;
final long blockSize=512;
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
long fileLen=blockSize * 3;
String src="/foo/src";
String dst="/foo/dst";
Path srcPath=new Path(src);
Path dstPath=new Path(dst);
DFSTestUtil.createFile(dfs,srcPath,fileLen,replFactor,1);
DFSTestUtil.createFile(dfs,dstPath,fileLen,replFactor,1);
LocatedBlocks lbs=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),dst,0,fileLen);
BlockManager bm=NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
dfs.rename(srcPath,dstPath,Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* This test attempts to rollback the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test public void testRollback() throws Exception {
File[] baseDirs;
UpgradeUtilities.initialize();
StorageInfo storageInfo=null;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Normal NameNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
checkResult(NAME_NODE,nameNodeDirs);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
checkResult(DATA_NODE,dataNodeDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Normal BlockPool rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"current",UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"previous",UpgradeUtilities.getCurrentBlockPoolID(cluster));
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION - 1,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
File[] dataCurrentDirs=new File[dataNodeDirs.length];
for (int i=0; i < dataNodeDirs.length; i++) {
dataCurrentDirs[i]=new File((new Path(dataNodeDirs[i] + "/current")).toString());
}
UpgradeUtilities.createDataNodeVersionFile(dataCurrentDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
assertTrue(cluster.isDataNodeUp());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
startNameNodeShouldFail("None of the storage directories contain previous fs state");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.UPGRADE).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with future stored layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with newer fsscTime in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback with no edits file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"edits.*");
startNameNodeShouldFail("Gap in transactions");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"fsimage_.*");
startNameNodeShouldFail("No valid image files found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with corrupt version file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
for ( File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail("file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
storageInfo=new StorageInfo(1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFile() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendtoFileDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
File file2=new File(testRoot,"file2");
createLocalFileWithRandomData(inputFileLength,file1);
createLocalFileWithRandomData(inputFileLength,file2);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
Path remoteFile=new Path("/remoteFile");
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString(),file2.toString(),remoteFile.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 2));
res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 4));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGet() throws IOException {
DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
final String fname="testGet.txt";
Path root=new Path("/test/get");
final Path remotef=new Path(root,fname);
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
TestGetRunner runner=new TestGetRunner(){
private int count=0;
private final FsShell shell=new FsShell(conf);
public String run( int exitcode, String... options) throws IOException {
String dst=new File(TEST_ROOT_DIR,fname + ++count).getAbsolutePath();
String[] args=new String[options.length + 3];
args[0]="-get";
args[args.length - 2]=remotef.toString();
args[args.length - 1]=dst;
for (int i=0; i < options.length; i++) {
args[i + 1]=options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode,shell.run(args));
}
catch ( Exception e) {
assertTrue(StringUtils.stringifyException(e),false);
}
return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
}
}
;
File localf=createLocalFile(new File(TEST_ROOT_DIR,fname));
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
dfs=cluster.getFileSystem();
mkdir(dfs,root);
dfs.copyFromLocalFile(false,false,new Path(localf.getPath()),remotef);
String localfcontent=DFSTestUtil.readFile(localf);
assertEquals(localfcontent,runner.run(0));
assertEquals(localfcontent,runner.run(0,"-ignoreCrc"));
List files=getBlockFiles(cluster);
dfs.close();
cluster.shutdown();
show("files=" + files);
corrupt(files);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build();
dfs=cluster.getFileSystem();
assertEquals(null,runner.run(1));
String corruptedcontent=runner.run(0,"-ignoreCrc");
assertEquals(localfcontent.substring(1),corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0) + 1,corruptedcontent.charAt(0));
}
finally {
if (null != dfs) {
try {
dfs.close();
}
catch ( Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
* As user2: Set an xattr (should pass with path access).
* As user1: Set an xattr (should pass).
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
* As user1: Change permissions only to owner
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1="user1";
final String GROUP1="supergroup";
final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1});
final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"});
final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
fs.setOwner(new Path("/"),USER1,GROUP1);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
SUPERUSER.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
createLocalFileWithRandomData(inputFileLength,file1);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
Path remoteFile=new Path("/remoteFile");
argv=new String[]{"-appendToFile",file1.toString(),"-",remoteFile.toString()};
res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testRecursiveRm() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
try {
fs.mkdirs(new Path(new Path("parent"),"child"));
try {
fs.delete(new Path("parent"),false);
assert (false);
}
catch ( IOException e) {
}
try {
fs.delete(new Path("parent"),true);
}
catch ( IOException e) {
assert (false);
}
}
finally {
try {
fs.close();
}
catch ( IOException e) {
}
;
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testPut() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
new File(TEST_ROOT_DIR,".f1.crc").delete();
new File(TEST_ROOT_DIR,".f2.crc").delete();
final File f1=createLocalFile(new File(TEST_ROOT_DIR,"f1"));
final File f2=createLocalFile(new File(TEST_ROOT_DIR,"f2"));
final Path root=mkdir(dfs,new Path("/test/put"));
final Path dst=new Path(root,"dst");
show("begin");
final Thread copy2ndFileThread=new Thread(){
@Override public void run(){
try {
show("copy local " + f2 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f2.getPath()),dst);
}
catch ( IOException ioe) {
show("good " + StringUtils.stringifyException(ioe));
return;
}
assertTrue(false);
}
}
;
SecurityManager sm=System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager(){
private boolean firstTime=true;
@Override public void checkPermission( Permission perm){
if (firstTime) {
Thread t=Thread.currentThread();
if (!t.toString().contains("DataNode")) {
String s="" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
firstTime=false;
copy2ndFileThread.start();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
}
}
}
);
show("copy local " + f1 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),dst);
show("done");
try {
copy2ndFileThread.join();
}
catch ( InterruptedException e) {
}
System.setSecurityManager(sm);
final Path destmultiple=mkdir(dfs,new Path("/test/putmultiple"));
Path[] srcs=new Path[2];
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.copyFromLocalFile(false,false,srcs,destmultiple);
srcs[0]=new Path(destmultiple,"f1");
srcs[1]=new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
final Path destmultiple2=mkdir(dfs,new Path("/test/movemultiple"));
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.moveFromLocalFile(srcs,destmultiple2);
assertFalse(f1.exists());
assertFalse(f2.exists());
srcs[0]=new Path(destmultiple2,"f1");
srcs[1]=new Path(destmultiple2,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
f1.delete();
f2.delete();
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testDu() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
try {
Path myPath=new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
assertTrue(fs.exists(myPath));
Path myFile=new Path("/test/dir/file");
writeFile(fs,myFile);
assertTrue(fs.exists(myFile));
Path myFile2=new Path("/test/dir/file2");
writeFile(fs,myFile2);
assertTrue(fs.exists(myFile2));
String[] args=new String[2];
args[0]="-du";
args[1]="/test/dir";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
String returnString=out.toString();
out.reset();
assertTrue(returnString.contains("22"));
assertTrue(returnString.contains("23"));
}
finally {
System.setOut(psBackup);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testURIPaths() throws Exception {
Configuration srcConf=new HdfsConfiguration();
Configuration dstConf=new HdfsConfiguration();
MiniDFSCluster srcCluster=null;
MiniDFSCluster dstCluster=null;
File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri");
bak.mkdirs();
try {
srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath());
dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs=srcCluster.getFileSystem();
FileSystem dstFs=dstCluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(srcConf);
String[] argv=new String[2];
argv[0]="-ls";
argv[1]=dstFs.getUri().toString() + "/";
int ret=ToolRunner.run(shell,argv);
assertEquals("ls works on remote uri ",0,ret);
dstFs.mkdirs(new Path("/hadoopdir"));
argv=new String[2];
argv[0]="-rmr";
argv[1]=dstFs.getUri().toString() + "/hadoopdir";
ret=ToolRunner.run(shell,argv);
assertEquals("-rmr works on remote uri " + argv[1],0,ret);
argv[0]="-du";
argv[1]=dstFs.getUri().toString() + "/";
ret=ToolRunner.run(shell,argv);
assertEquals("du works on remote uri ",0,ret);
File furi=new File(TEST_ROOT_DIR,"furi");
createLocalFile(furi);
argv=new String[3];
argv[0]="-put";
argv[1]=furi.toURI().toString();
argv[2]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" put is working ",0,ret);
argv[0]="-cp";
argv[1]=dstFs.getUri().toString() + "/furi";
argv[2]=srcFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cp is working ",0,ret);
assertTrue(srcFs.exists(new Path("/furi")));
argv=new String[2];
argv[0]="-cat";
argv[1]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cat is working ",0,ret);
dstFs.delete(new Path("/furi"),true);
dstFs.delete(new Path("/hadoopdir"),true);
String file="/tmp/chownTest";
Path path=new Path(file);
Path parent=new Path("/tmp");
Path root=new Path("/");
TestDFSShell.writeFile(dstFs,path);
runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*");
confirmOwner(null,"herbivores",dstFs,parent,path);
runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/");
confirmOwner(null,"reptiles",dstFs,root,parent,path);
argv[0]="-cat";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" default works for cat",0,ret);
argv[0]="-ls";
argv[1]="hdfs:///";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for ls ",0,ret);
argv[0]="-rmr";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for rm/rmr",0,ret);
}
finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests various options of DFSShell.
*/
@Test(timeout=120000) public void testDFSShell() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
DistributedFileSystem fileSys=(DistributedFileSystem)fs;
FsShell shell=new FsShell();
shell.setConf(conf);
try {
Path myPath=new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
Path myFile=new Path("/test/mkdirs/myFile");
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
Path myFile2=new Path("/test/mkdirs/myFile2");
writeFile(fileSys,myFile2);
assertTrue(fileSys.exists(myFile2));
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile*";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
assertFalse(fileSys.exists(myFile));
assertFalse(fileSys.exists(myFile2));
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
writeFile(fileSys,myFile2);
assertTrue(fileSys.exists(myFile2));
}
{
String[] args=new String[3];
args[0]="-cat";
args[1]="/test/mkdirs/myFile";
args[2]="/test/mkdirs/myFile2";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run: " + StringUtils.stringifyException(e));
}
assertTrue(val == 0);
}
fileSys.delete(myFile2,true);
{
String[] args=new String[2];
args[0]="-cat";
args[1]="/test/mkdirs/myFile1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val != 0);
}
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val != 0);
}
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
}
{
String[] args;
int val;
args=new String[3];
args[0]="-test";
args[1]="-e";
args[2]="/test/mkdirs/noFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args[1]="-z";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args=new String[2];
args[0]="-touchz";
args[1]="/test/mkdirs/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
args=new String[2];
args[0]="-touchz";
args[1]="/test/mkdirs/thisDirNotExists/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args=new String[3];
args[0]="-test";
args[1]="-e";
args[2]="/test/mkdirs/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
args[1]="-d";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args[1]="-z";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[2];
args[0]="-mkdir";
args[1]="/test/dir1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
String[] args1=new String[3];
args1[0]="-cp";
args1[1]="/test/dir1";
args1[2]="/test/dir1/dir2";
val=0;
try {
val=shell.run(args1);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args1[0]="-cp";
args1[1]="/test/dir1";
args1[2]="/test/dir1foo";
val=-1;
try {
val=shell.run(args1);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]="/test/mkdirs/noFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]="/test/mkdirs";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]=myFile.toString();
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]="/test/mkdirs/noFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]="/test/mkdirs/isFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]=myFile.toString();
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
}
finally {
try {
fileSys.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testSetXAttrPermission() throws Exception {
UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path p=new Path("/foo");
fs.mkdirs(p);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
fs.setPermission(p,new FsPermission((short)0750));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting("tmpname",new String[]{"mygroup"});
MiniDFSCluster dfs=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
dfs=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=dfs.getFileSystem();
Path p=new Path("/foo");
fs.mkdirs(p);
fs.setPermission(p,new FsPermission((short)0700));
bak=System.err;
tmpUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
FsShell fshell=new FsShell(conf);
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream tmp=new PrintStream(out);
System.setErr(tmp);
String[] args=new String[2];
args[0]="-ls";
args[1]="/foo";
int ret=ToolRunner.run(fshell,args);
assertEquals("returned should be 1",1,ret);
String str=out.toString();
assertTrue("permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (dfs != null) {
dfs.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testZeroSizeFile() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
final File f1=new File(TEST_ROOT_DIR,"f1");
assertTrue(!f1.exists());
assertTrue(f1.createNewFile());
assertTrue(f1.exists());
assertTrue(f1.isFile());
assertEquals(0L,f1.length());
final Path root=mkdir(dfs,new Path("/test/zeroSizeFile"));
final Path remotef=new Path(root,"dst");
show("copy local " + f1 + " to remote "+ remotef);
dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),remotef);
show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
final File f2=new File(TEST_ROOT_DIR,"f2");
assertTrue(!f2.exists());
dfs.copyToLocalFile(remotef,new Path(f2.getPath()));
assertTrue(f2.exists());
assertTrue(f2.isFile());
assertEquals(0L,f2.length());
f1.delete();
f2.delete();
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* check command error outputs and exit statuses.
*/
@Test(timeout=30000) public void testErrOutPut() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem srcFs=cluster.getFileSystem();
Path root=new Path("/nonexistentfile");
bak=System.err;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream tmp=new PrintStream(out);
System.setErr(tmp);
String[] argv=new String[2];
argv[0]="-cat";
argv[1]=root.toUri().getPath();
int ret=ToolRunner.run(new FsShell(),argv);
assertEquals(" -cat returned 1 ",1,ret);
String returned=out.toString();
assertTrue("cat does not print exceptions ",(returned.lastIndexOf("Exception") == -1));
out.reset();
argv[0]="-rm";
argv[1]=root.toString();
FsShell shell=new FsShell();
shell.setConf(conf);
ret=ToolRunner.run(shell,argv);
assertEquals(" -rm returned 1 ",1,ret);
returned=out.toString();
out.reset();
assertTrue("rm prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
argv[0]="-rmr";
argv[1]=root.toString();
ret=ToolRunner.run(shell,argv);
assertEquals(" -rmr returned 1",1,ret);
returned=out.toString();
assertTrue("rmr prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-du";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -du prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-dus";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -dus prints reasonable error",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistenfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not return Found 0 items",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
assertEquals(" -lsr should fail ",1,ret);
out.reset();
srcFs.mkdirs(new Path("/testdir"));
argv[0]="-ls";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not print out anything ",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/user/nonxistant/*";
ret=ToolRunner.run(shell,argv);
assertEquals(" -ls on nonexistent glob returns 1",1,ret);
out.reset();
argv[0]="-mkdir";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1 ",1,ret);
assertTrue(" -mkdir returned File exists",(returned.lastIndexOf("File exists") != -1));
Path testFile=new Path("/testfile");
OutputStream outtmp=srcFs.create(testFile);
outtmp.write(testFile.toString().getBytes());
outtmp.close();
out.reset();
argv[0]="-mkdir";
argv[1]="/testfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1",1,ret);
assertTrue(" -mkdir returned this is a file ",(returned.lastIndexOf("not a directory") != -1));
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="file";
ret=ToolRunner.run(shell,argv);
assertEquals("mv failed to rename",1,ret);
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletest";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue("no output from rename",(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletmp";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" unix like output",(returned.lastIndexOf("No such file or") != -1));
out.reset();
argv=new String[1];
argv[0]="-du";
srcFs.mkdirs(srcFs.getHomeDirectory());
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" no error ",0,ret);
assertTrue("empty path specified",(returned.lastIndexOf("empty string") == -1));
out.reset();
argv=new String[3];
argv[0]="-test";
argv[1]="-d";
argv[2]="/no/such/dir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -test -d wrong result ",1,ret);
assertTrue(returned.isEmpty());
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopyToLocal() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
FsShell shell=new FsShell();
shell.setConf(conf);
try {
String root=createTree(dfs,"copyToLocal");
{
try {
assertEquals(0,runCmd(shell,"-copyToLocal",root + "*",TEST_ROOT_DIR));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File localroot=new File(TEST_ROOT_DIR,"copyToLocal");
File localroot2=new File(TEST_ROOT_DIR,"copyToLocal2");
File f1=new File(localroot,"f1");
assertTrue("Copying failed.",f1.isFile());
File f2=new File(localroot,"f2");
assertTrue("Copying failed.",f2.isFile());
File sub=new File(localroot,"sub");
assertTrue("Copying failed.",sub.isDirectory());
File f3=new File(sub,"f3");
assertTrue("Copying failed.",f3.isFile());
File f4=new File(sub,"f4");
assertTrue("Copying failed.",f4.isFile());
File f5=new File(localroot2,"f1");
assertTrue("Copying failed.",f5.isFile());
f1.delete();
f2.delete();
f3.delete();
f4.delete();
f5.delete();
sub.delete();
}
{
String[] args={"-copyToLocal","nosuchfile",TEST_ROOT_DIR};
try {
assertEquals(1,shell.run(args));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File f6=new File(TEST_ROOT_DIR,"nosuchfile");
assertTrue(!f6.exists());
}
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir=new Path(hdfsTestDir,"srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
Path srcFile=new Path(srcDir,"srcFile");
fs.create(srcFile).close();
FileStatus status=fs.getFileStatus(srcDir);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE);
fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path targetDir1=new Path(hdfsTestDir,"targetDir1");
String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(targetDir1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir2=new Path(hdfsTestDir,"targetDir2");
argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir3=new Path(hdfsTestDir,"targetDir3");
argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir4=new Path(hdfsTestDir,"targetDir4");
argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4));
Path targetDir5=new Path(hdfsTestDir,"targetDir5");
argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5));
}
finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls=fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bakErr=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
fs.mkdirs(p);
bakErr=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
{
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1);
out.reset();
return null;
}
}
);
{
final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0);
out.reset();
}
}
finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(src,USER_A1,USER_A1_VALUE);
fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target3=new Path(hdfsTestDir,"targetfile3");
argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target4=new Path(hdfsTestDir,"targetfile4");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4));
Path target5=new Path(hdfsTestDir,"targetfile5");
argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLsr() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
final String root=createTree(dfs,"lsr");
dfs.mkdirs(new Path(root,"zzz"));
runLsr(new FsShell(conf),root,0);
final Path sub=new Path(root,"sub");
dfs.setPermission(sub,new FsPermission((short)0));
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
final String tmpusername=ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting(tmpusername,new String[]{tmpusername});
String results=tmpUGI.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
return runLsr(new FsShell(conf),root,1);
}
}
);
assertTrue(results.contains("zzz"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
*
*/
@Test(timeout=300000) public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf=UpgradeUtilities.initializeStorageStateConf(1,new HdfsConfiguration());
StorageData[] versions=initializeVersions();
UpgradeUtilities.createNameNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build();
StorageData nameNodeVersion=new StorageData(HdfsConstants.NAMENODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),UpgradeUtilities.getCurrentBlockPoolID(cluster));
log("NameNode version info",NAME_NODE,null,nameNodeVersion);
String bpid=UpgradeUtilities.getCurrentBlockPoolID(cluster);
for (int i=0; i < versions.length; i++) {
File[] storage=UpgradeUtilities.createDataNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),"current");
log("DataNode version info",DATA_NODE,i,versions[i]);
UpgradeUtilities.createDataNodeVersionFile(storage,versions[i].storageInfo,bpid,versions[i].blockPoolId);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
catch ( Exception ignore) {
}
assertTrue(cluster.getNameNode() != null);
assertEquals(isVersionCompatible(nameNodeVersion,versions[i]),cluster.isDataNodeUp());
cluster.shutdownDataNodes();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for Datanode storage and
* attempts to startup the DataNode normally.
*/
@Test public void testDNStorageStates() throws Exception {
String[] baseDirs;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("DATA_NODE recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createDataNodeStorageState(testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultDataNode(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
}
}
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for block pool storage and
* attempts to startup the DataNode normally.
*/
@Test public void testBlockPoolStorageStates() throws Exception {
String[] baseDirs;
String bpid=UpgradeUtilities.getCurrentBlockPoolID(null);
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt("dfs.datanode.scan.period.hours",-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("BLOCK_POOL recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createBlockPoolStorageState(bpid,testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultBlockPool(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
}
cluster.shutdown();
}
}
}
IterativeVerifier BranchVerifier BooleanVerifier
/**
* This test iterates over the testCases table and attempts
* to startup the NameNode normally.
*/
@Test public void testNNStorageStates() throws Exception {
String[] baseDirs;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_NN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("NAME_NODE recovery",numDirs,i,testCase);
baseDirs=createNameNodeStorageState(testCase);
if (shouldRecover) {
cluster=createCluster(conf);
checkResultNameNode(baseDirs,curAfterRecover,prevAfterRecover);
cluster.shutdown();
}
else {
try {
cluster=createCluster(conf);
throw new AssertionError("NameNode should have failed to start");
}
catch ( IOException expected) {
if (!testCases[i][CURRENT_EXISTS] && !testCases[i][PREVIOUS_TMP_EXISTS] && !testCases[i][PREVIOUS_EXISTS]&& !testCases[i][REMOVED_TMP_EXISTS]) {
assertTrue(expected.getLocalizedMessage().contains("NameNode is not formatted"));
}
}
}
cluster.shutdown();
}
}
}
BooleanVerifier IgnoredMethod HybridVerifier
@Ignore public void test203LayoutVersion(){
for ( int lv : Storage.LAYOUT_VERSIONS_203) {
assertTrue(Storage.is203LayoutVersion(lv));
}
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test public void testUpgradeFromRel2ReservedImage() throws Exception {
unpackStorage(HADOOP2_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("reserved path component in this version",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/edits","/edits/.reserved","/edits/.user-snapshot","/edits/.user-snapshot/editsdir","/edits/.user-snapshot/editsdir/editscontents","/edits/.user-snapshot/editsdir/editsdir2","/image","/image/.reserved","/image/.user-snapshot","/image/.user-snapshot/imagedir","/image/.user-snapshot/imagedir/imagecontents","/image/.user-snapshot/imagedir/imagedir2","/.my-reserved","/.my-reserved/edits-touch","/.my-reserved/image-touch"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test upgrade from a branch-1.2 image with reserved paths
*/
@Test public void testUpgradeFromRel1ReservedImage() throws Exception {
unpackStorage(HADOOP1_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.my-reserved","/.user-snapshot","/.user-snapshot/.user-snapshot","/.user-snapshot/open","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot","/user","/user/andrew","/user/andrew/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
* NN, verifying that the correct error message is thrown.
*/
@Test public void testFailOnPreUpgradeImage() throws IOException {
Configuration conf=new HdfsConfiguration();
File namenodeStorage=new File(TEST_ROOT_DIR,"nnimage-0.3.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,namenodeStorage.toString());
FileUtil.fullyDelete(namenodeStorage);
assertTrue("Make " + namenodeStorage,namenodeStorage.mkdirs());
File imageDir=new File(namenodeStorage,"image");
assertTrue("Make " + imageDir,imageDir.mkdirs());
File imageFile=new File(imageDir,"fsimage");
byte[] imageBytes=StringUtils.hexStringToByte("fffffffee17c0d2700000000");
FileOutputStream fos=new FileOutputStream(imageFile);
try {
fos.write(imageBytes);
}
finally {
fos.close();
}
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build();
fail("Was able to start NN from 0.3.0 image");
}
catch ( IOException ioe) {
if (!ioe.toString().contains("Old layout version is 'too old'")) {
throw ioe;
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test upgrade from a 0.23.11 image with reserved paths
*/
@Test public void testUpgradeFromRel023ReservedImage() throws Exception {
unpackStorage(HADOOP023_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.user-snapshot","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
* Federation is enabled.
* Regression test for HDFS-3351.
*/
@Test public void testConfModificationNoFederationOrHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId=null;
String nnId=null;
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234");
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
NameNode.initializeGenericKeys(conf,nsId,nnId);
assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY));
}
InternalCallVerifier BooleanVerifier
/**
* Test constructing LocatedBlock with null cachedLocs
*/
@Test public void testLocatedBlockConstructorWithNullCachedLocs(){
DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds=new DatanodeInfo[1];
ds[0]=d;
ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1);
LocatedBlock l1=new LocatedBlock(b1,ds,null,null,0,false,null);
final DatanodeInfo[] cachedLocs=l1.getCachedLocations();
assertTrue(cachedLocs.length == 0);
}
BooleanVerifier
@Test(timeout=15000) public void testIsValidName(){
assertFalse(DFSUtil.isValidName("/foo/../bar"));
assertFalse(DFSUtil.isValidName("/foo/./bar"));
assertFalse(DFSUtil.isValidName("/foo//bar"));
assertTrue(DFSUtil.isValidName("/"));
assertTrue(DFSUtil.isValidName("/bar/"));
assertFalse(DFSUtil.isValidName("/foo/:/bar"));
assertFalse(DFSUtil.isValidName("/foo:bar"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetNNUris() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_ADDR="ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR="ns1-nn2.example.com:8020";
final String NS2_NN_ADDR="ns2-nn.example.com:8020";
final String NN1_ADDR="nn.example.com:8020";
final String NN1_SRVC_ADDR="nn.example.com:8021";
final String NN2_ADDR="nn2.example.com:8020";
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns2"),NS2_NN_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"hdfs://" + NN1_ADDR);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN2_ADDR);
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(4,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"viewfs://vfs-name.example.com");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN1_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,NN1_ADDR);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,NN1_SRVC_ADDR);
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(1,uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_HOST="ns1-nn1.example.com:8020";
final String NS1_NN2_HOST="ns1-nn2.example.com:8020";
final String NS2_NN1_HOST="ns2-nn1.example.com:8020";
final String NS2_NN2_HOST="ns2-nn2.example.com:8020";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"ns1-nn1,ns1-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns2"),"ns2-nn1,ns2-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn1"),NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn2"),NS1_NN2_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn1"),NS2_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn2"),NS2_NN2_HOST);
Map> map=DFSUtil.getHaNnRpcAddresses(conf);
assertTrue(HAUtil.isHAEnabled(conf,"ns1"));
assertTrue(HAUtil.isHAEnabled(conf,"ns2"));
assertFalse(HAUtil.isHAEnabled(conf,"ns3"));
assertEquals(NS1_NN1_HOST,map.get("ns1").get("ns1-nn1").toString());
assertEquals(NS1_NN2_HOST,map.get("ns1").get("ns1-nn2").toString());
assertEquals(NS2_NN1_HOST,map.get("ns2").get("ns2-nn1").toString());
assertEquals(NS2_NN2_HOST,map.get("ns2").get("ns2-nn2").toString());
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn1"));
assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn2"));
assertEquals(NS2_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns2","ns2-nn1"));
assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,null,"ns1-nn1"));
assertEquals(null,DFSUtil.getNamenodeNameServiceId(conf));
assertEquals(null,DFSUtil.getSecondaryNameServiceId(conf));
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(2,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://ns2")));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test conversion of LocatedBlock to BlockLocation
*/
@Test public void testLocatedBlocks2Locations(){
DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds=new DatanodeInfo[1];
ds[0]=d;
ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1);
LocatedBlock l1=new LocatedBlock(b1,ds,0,false);
ExtendedBlock b2=new ExtendedBlock("bpid",2,1,1);
LocatedBlock l2=new LocatedBlock(b2,ds,0,true);
List ls=Arrays.asList(l1,l2);
LocatedBlocks lbs=new LocatedBlocks(10,false,ls,l2,true,null);
BlockLocation[] bs=DFSUtil.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,bs.length == 2);
int corruptCount=0;
for ( BlockLocation b : bs) {
if (b.isCorrupt()) {
corruptCount++;
}
}
assertTrue("expected 1 corrupt files but got " + corruptCount,corruptCount == 1);
bs=DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0,bs.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)}
*/
@Test public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
final String NN1_ADDRESS="localhost:9000";
final String NN2_ADDRESS="localhost:9001";
final String NN3_ADDRESS="localhost:9002";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS);
Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(2,nnMap.size());
Map nn1Map=nnMap.get("nn1");
assertEquals(1,nn1Map.size());
InetSocketAddress addr=nn1Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9000,addr.getPort());
Map nn2Map=nnMap.get("nn2");
assertEquals(1,nn2Map.size());
addr=nn2Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9001,addr.getPort());
checkNameServiceId(conf,NN1_ADDRESS,"nn1");
checkNameServiceId(conf,NN2_ADDRESS,"nn2");
checkNameServiceId(conf,NN3_ADDRESS,null);
assertFalse(HAUtil.isHAEnabled(conf,"nn1"));
assertFalse(HAUtil.isHAEnabled(conf,"nn2"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the client respects its keepalive timeout.
*/
@Test(timeout=30000) public void testClientResponsesKeepAliveTimeout() throws Exception {
Configuration clientConf=new Configuration(conf);
final long CLIENT_EXPIRY_MS=10L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testClientResponsesKeepAliveTimeout");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
assertEquals(0,peerCache.size());
assertXceiverCount(0);
DFSTestUtil.readFile(fs,TEST_FILE);
assertEquals(1,peerCache.size());
assertXceiverCount(1);
Thread.sleep(CLIENT_EXPIRY_MS + 1);
Peer peer=peerCache.get(dn.getDatanodeId(),false);
assertTrue(peer == null);
assertEquals(0,peerCache.size());
}
InternalCallVerifier BooleanVerifier
/**
* Test for the case where the client beings to read a long block, but doesn't
* read bytes off the stream quickly. The datanode should time out sending the
* chunks and the transceiver should die, even if it has a long keepalive.
*/
@Test(timeout=300000) public void testSlowReader() throws Exception {
final long CLIENT_EXPIRY_MS=600000L;
Configuration clientConf=new Configuration(conf);
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testSlowReader");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
DataNodeProperties props=cluster.stopDataNode(0);
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,WRITE_TIMEOUT);
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,120000);
assertTrue(cluster.restartDataNode(props,true));
dn=cluster.getDataNodes().get(0);
cluster.triggerHeartbeats();
DFSTestUtil.createFile(fs,TEST_FILE,1024 * 1024 * 8L,(short)1,0L);
FSDataInputStream stm=fs.open(TEST_FILE);
stm.read();
assertXceiverCount(1);
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
return getXceiverCountWithoutServer() == 0;
}
}
,500,50000);
IOUtils.closeStream(stm);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPacketHeader() throws IOException {
PacketHeader hdr=new PacketHeader(4,1024,100,false,4096,false);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
hdr.write(new DataOutputStream(baos));
PacketHeader readBack=new PacketHeader();
ByteArrayInputStream bais=new ByteArrayInputStream(baos.toByteArray());
readBack.readFields(new DataInputStream(bais));
assertEquals(hdr,readBack);
readBack=new PacketHeader();
readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
assertEquals(hdr,readBack);
assertTrue(hdr.sanityCheck(99));
assertFalse(hdr.sanityCheck(100));
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDuplicateScans() throws Exception {
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
FileSystem fs=null;
try {
fs=cluster.getFileSystem();
DataNode dataNode=cluster.getDataNodes().get(0);
int infoPort=dataNode.getInfoPort();
long scanTimeBefore=0, scanTimeAfter=0;
for (int i=1; i < 10; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L);
waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT);
if (i > 1) {
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1))));
assertFalse("scan time shoud not be 0",scanTimeAfter == 0);
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i)));
}
cluster.restartDataNode(0);
Thread.sleep(10000);
dataNode=cluster.getDataNodes().get(0);
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9))));
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test if NameNode handles truncated blocks in block report
*/
@Test public void testTruncatedBlockReport() throws Exception {
final Configuration conf=new HdfsConfiguration();
final short REPLICATION_FACTOR=(short)2;
final Path fileName=new Path("/file1");
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,3);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,3L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,false);
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
cluster.waitActive();
ExtendedBlock block;
try {
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,fileName,1,REPLICATION_FACTOR,0);
DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR);
block=DFSTestUtil.getFirstBlock(fs,fileName);
}
finally {
cluster.shutdown();
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).format(false).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
int infoPort=cluster.getDataNodes().get(0).getInfoPort();
assertTrue(waitForVerification(infoPort,fs,fileName,1,startTime,TIMEOUT) >= startTime);
if (!changeReplicaLength(block,0,-1)) {
throw new IOException("failed to find or change length of replica on node 0 " + cluster.getDataNodes().get(0).getDisplayName());
}
}
finally {
cluster.shutdown();
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).format(false).build();
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
cluster.waitClusterUp();
assertFalse("failed to leave safe mode",cluster.getNameNode().isInSafeMode());
try {
DFSTestUtil.waitReplication(cluster.getFileSystem(),fileName,REPLICATION_FACTOR);
waitForBlockDeleted(block,0,TIMEOUT);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDatanodeBlockScanner() throws IOException, TimeoutException {
long startTime=Time.monotonicNow();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testBlockVerification/file1");
Path file2=new Path("/tmp/testBlockVerification/file2");
DFSTestUtil.createFile(fs,file1,10,(short)1,0);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false).build();
cluster.waitActive();
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
fs=cluster.getFileSystem();
DatanodeInfo dn=dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
assertTrue(waitForVerification(dn.getInfoPort(),fs,file1,1,startTime,TIMEOUT) >= startTime);
DFSTestUtil.createFile(fs,file2,10,(short)1,0);
IOUtils.copyBytes(fs.open(file2),new IOUtils.NullOutputStream(),conf,true);
assertTrue(waitForVerification(dn.getInfoPort(),fs,file2,2,startTime,TIMEOUT) >= startTime);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testBlockCorruptionPolicy() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
Random random=new Random();
FileSystem fs=null;
int rand=random.nextInt(3);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testBlockVerification/file1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(rand,block));
cluster.restartDataNode(rand);
DFSTestUtil.waitReplication(fs,file1,(short)2);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(0,block));
assertTrue(MiniDFSCluster.corruptReplica(1,block));
assertTrue(MiniDFSCluster.corruptReplica(2,block));
for ( DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.runBlockScannerForBlock(dn,block);
}
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertTrue(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
cluster.shutdown();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that a data-node does not start if configuration specifies
* incorrect URI scheme in data directory.
* Test that a data-node starts if data directory is specified as
* URI = "file:///path" or as a non URI path.
*/
@Test public void testDataDirectories() throws IOException {
File dataDir=new File(BASE_DIR,"data").getCanonicalFile();
Configuration conf=cluster.getConfiguration(0);
String dnDir=makeURI("shv",null,fileAsURI(dataDir).getPath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir);
DataNode dn=null;
try {
dn=DataNode.createDataNode(new String[]{},conf);
fail();
}
catch ( Exception e) {
}
finally {
if (dn != null) {
dn.shutdown();
}
}
assertNull("Data-node startup should have failed.",dn);
String dnDir1=fileAsURI(dataDir).toString() + "1";
String dnDir2=makeURI("file","localhost",fileAsURI(dataDir).getPath() + "2");
String dnDir3=dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir1 + "," + dnDir2+ ","+ dnDir3);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertTrue("Data-node should startup.",cluster.isDataNodeUp());
}
finally {
if (cluster != null) {
cluster.shutdownDataNodes();
}
}
}
BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Ensure the datanode manager does not do host lookup after registration,
* especially for node reports.
* @throws Exception
*/
@Test public void testDNSLookups() throws Exception {
MonitorDNS sm=new MonitorDNS();
System.setSecurityManager(sm);
MiniDFSCluster cluster=null;
try {
HdfsConfiguration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
cluster.waitActive();
int initialLookups=sm.lookups;
assertTrue("dns security manager is active",initialLookups != 0);
DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.ALL);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
assertEquals(initialLookups,sm.lookups);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
System.setSecurityManager(null);
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using a "registration name" in a host include file.
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException {
Configuration hdfsConf=new Configuration(conf);
final String registrationName="127.0.0.100";
final String nonExistentDn="127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName);
cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build();
cluster.waitActive();
ArrayList nodes=new ArrayList();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD);
if (info.length == 1) {
break;
}
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
int dnPort=cluster.getDataNodes().get(0).getXferPort();
nodes=new ArrayList();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
cluster.restartDataNode(0);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName,info[0].getHostName());
break;
}
LOG.info("Waiting for datanode to come back");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests restart of namenode while datanode hosts are added to exclude file
*/
@Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes=1;
int numDatanodes=1;
int replicas=1;
startCluster(numNamenodes,numDatanodes,conf);
Path file1=new Path("testDecommission.dat");
FileSystem fileSys=cluster.getFileSystem();
writeFile(fileSys,file1,replicas);
DFSClient client=getDfsClient(cluster.getNameNode(),conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID=info[0];
String excludedDatanodeName=info[0].getXferAddr();
writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName)));
cluster.startDataNodes(conf,1,true,null,null,null,null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size());
cluster.restartNameNode();
DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID);
waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
int tries=0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) {
break;
}
}
catch ( InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20);
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(numNamenodes,numDatanodes,conf);
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier
@Test public void testDeprecatedKeys() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.set("topology.script.file.name","xyz");
String scriptFile=conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
assertTrue(scriptFile.equals("xyz"));
conf.setInt("dfs.replication.interval",1);
String alpha=DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
int repInterval=conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,3);
assertTrue(repInterval == 1);
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileChecksum() throws Exception {
final long seed=RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs=cluster.getFileSystem();
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi=UserGroupInformation.createUserForTesting(current.getShortUserName() + "x",new String[]{"user"});
try {
hdfs.getFileChecksum(new Path("/test/TestNonExistingFile"));
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue("Not throwing the intended exception message",e.getMessage().contains("File does not exist: /test/TestNonExistingFile"));
}
try {
Path path=new Path("/test/TestExistingDir/");
hdfs.mkdirs(path);
hdfs.getFileChecksum(path);
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue("Not throwing the intended exception message",e.getMessage().contains("Path is not a file: /test/TestExistingDir"));
}
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return new Path(webhdfsuri).getFileSystem(conf);
}
}
);
final Path dir=new Path("/filechecksum");
final int block_size=1024;
final int buffer_size=conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512);
for (int n=0; n < 5; n++) {
final byte[] data=new byte[RAN.nextInt(block_size / 2 - 1) + n * block_size + 1];
RAN.nextBytes(data);
System.out.println("data.length=" + data.length);
final Path foo=new Path(dir,"foo" + n);
{
final FSDataOutputStream out=hdfs.create(foo,false,buffer_size,(short)2,block_size);
out.write(data);
out.close();
}
final FileChecksum hdfsfoocs=hdfs.getFileChecksum(foo);
System.out.println("hdfsfoocs=" + hdfsfoocs);
final FileChecksum webhdfsfoocs=webhdfs.getFileChecksum(foo);
System.out.println("webhdfsfoocs=" + webhdfsfoocs);
final Path webhdfsqualified=new Path(webhdfsuri + dir,"foo" + n);
final FileChecksum webhdfs_qfoocs=webhdfs.getFileChecksum(webhdfsqualified);
System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
final Path zeroByteFile=new Path(dir,"zeroByteFile" + n);
{
final FSDataOutputStream out=hdfs.create(zeroByteFile,false,buffer_size,(short)2,block_size);
out.close();
}
{
final FileChecksum zeroChecksum=hdfs.getFileChecksum(zeroByteFile);
assertEquals(zeroChecksum.toString(),"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
}
final Path bar=new Path(dir,"bar" + n);
{
final FSDataOutputStream out=hdfs.create(bar,false,buffer_size,(short)2,block_size);
out.write(data);
out.close();
}
{
final FileChecksum barcs=hdfs.getFileChecksum(bar);
final int barhashcode=barcs.hashCode();
assertEquals(hdfsfoocs.hashCode(),barhashcode);
assertEquals(hdfsfoocs,barcs);
assertEquals(webhdfsfoocs.hashCode(),barhashcode);
assertEquals(webhdfsfoocs,barcs);
assertEquals(webhdfs_qfoocs.hashCode(),barhashcode);
assertEquals(webhdfs_qfoocs,barcs);
}
hdfs.setPermission(dir,new FsPermission((short)0));
{
try {
webhdfs.getFileChecksum(webhdfsqualified);
fail();
}
catch ( IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception",ioe);
}
}
hdfs.setPermission(dir,new FsPermission((short)0777));
}
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateWithCustomChecksum() throws Exception {
Configuration conf=getTestConfiguration();
MiniDFSCluster cluster=null;
Path testBasePath=new Path("/test/csum");
Path path1=new Path(testBasePath,"file_wtih_crc1");
Path path2=new Path(testBasePath,"file_with_crc2");
ChecksumOpt opt1=new ChecksumOpt(DataChecksum.Type.CRC32C,512);
ChecksumOpt opt2=new ChecksumOpt(DataChecksum.Type.CRC32,512);
FsPermission perm=FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
EnumSet flags=EnumSet.of(CreateFlag.OVERWRITE,CreateFlag.CREATE);
short repl=1;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem dfs=cluster.getFileSystem();
dfs.mkdirs(testBasePath);
FSDataOutputStream out1=dfs.create(path1,perm,flags,4096,repl,131072L,null,opt1);
FSDataOutputStream out2=dfs.create(path2,perm,flags,4096,repl,131072L,null,opt2);
for (int i=0; i < 1024; i++) {
out1.write(i);
out2.write(i);
}
out1.close();
out2.close();
MD5MD5CRC32FileChecksum sum1=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1);
MD5MD5CRC32FileChecksum sum2=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2);
assertFalse(sum1.equals(sum2));
assertEquals(DataChecksum.Type.CRC32C,sum1.getCrcType());
assertEquals(DataChecksum.Type.CRC32,sum2.getCrcType());
}
finally {
if (cluster != null) {
cluster.getFileSystem().delete(testBasePath,true);
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception {
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/errorfile1.dat");
final Path tmpFile2=new Path("/errorfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
List allLocs=Lists.newArrayList();
allLocs.addAll(Arrays.asList(blockLocs1));
allLocs.addAll(Arrays.asList(blockLocs2));
DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(3000);
return null;
}
}
).when(injector).getHdfsBlocksMetadata();
DataNodeFaultInjector.instance=injector;
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs);
for ( BlockStorageLocation loc : locs) {
assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length);
}
DataNodeFaultInjector.instance=new DataNodeFaultInjector();
DataNodeProperties stoppedNode=cluster.stopDataNode(0);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getHosts().length);
assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null));
}
cluster.restartDataNode(stoppedNode,true);
cluster.waitActive();
fs.delete(tmpFile2,true);
HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
assertNotNull(locs[0].getVolumeIds()[0]);
assertNotNull(locs[0].getVolumeIds()[1]);
assertNull(locs[1].getVolumeIds()[0]);
assertNull(locs[1].getVolumeIds()[1]);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testFileCloseStatus() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=cluster.getFileSystem();
try {
Path file=new Path("/simpleFlush.dat");
FSDataOutputStream output=fs.create(file);
output.writeBytes("Some test data");
output.flush();
assertFalse("File status should be open",fs.isFileClosed(file));
output.close();
assertTrue("File status should be closed",fs.isFileClosed(file));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDFSClient() throws Exception {
Configuration conf=getTestConfiguration();
final long grace=1000L;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String filepathstring="/test/LeaseChecker/foo";
final Path[] filepaths=new Path[4];
for (int i=0; i < filepaths.length; i++) {
filepaths[i]=new Path(filepathstring + i);
}
final long millis=Time.now();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
{
final FSDataOutputStream out=dfs.create(filepaths[0]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out1=dfs.create(filepaths[1]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
final FSDataOutputStream out2=dfs.create(filepaths[2]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out3=dfs.create(filepaths[3]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
dfs.close();
}
{
FileSystem fs=cluster.getFileSystem();
Path dir=new Path("/wrwelkj");
assertFalse("File should not exist for test.",fs.exists(dir));
try {
FSDataInputStream in=fs.open(dir);
try {
in.close();
fs.close();
}
finally {
assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false);
}
}
catch ( FileNotFoundException fnf) {
}
}
{
final DistributedFileSystem dfs=cluster.getFileSystem();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
FSDataInputStream in=dfs.open(filepaths[0]);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
assertEquals(millis,in.readLong());
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
in.close();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
dfs.close();
}
{
String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file";
Path path=new Path(uri);
FileSystem fs=FileSystem.get(path.toUri(),conf);
FSDataOutputStream out=fs.create(path);
byte[] buf=new byte[1024];
out.write(buf);
out.close();
FSDataInputStream in=fs.open(path);
in.readFully(buf);
in.close();
fs.close();
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf=getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/tmpfile1.dat");
final Path tmpFile2=new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2);
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs));
int counter=0;
for ( BlockStorageLocation l : locs) {
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length);
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,id != null);
}
}
}
finally {
cluster.shutdown();
}
}
BooleanVerifier
@Test public void testDFSSeekExceptions() throws IOException {
Configuration conf=getTestConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys=cluster.getFileSystem();
String file="/test/fileclosethenseek/file-0";
Path path=new Path(file);
FSDataOutputStream output=fileSys.create(path);
output.writeBytes("Some test data to write longer than 10 bytes");
output.close();
FSDataInputStream input=fileSys.open(path);
input.seek(10);
boolean threw=false;
try {
input.seek(100);
}
catch ( IOException e) {
threw=true;
}
assertTrue("Failed to throw IOE when seeking past end",threw);
input.close();
threw=false;
try {
input.seek(1);
}
catch ( IOException e) {
threw=true;
}
assertTrue("Failed to throw IOE when seeking after close",threw);
fileSys.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedReadClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNode(0));
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedWriteClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path baseFile=new Path("/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
final Path encFile1=new Path(zone,"myfile");
DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED);
verifyFilesEqual(fs,baseFile,encFile1,len);
assertNumZones(1);
String keyName=dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
verifyFilesEqual(fs,baseFile,encFile1,len);
final Path encFile2=new Path(zone,"myfile2");
DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED);
FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName());
verifyFilesEqual(fs,encFile1,encFile2,len);
}
InternalCallVerifier BooleanVerifier
/**
* This quite tricky test prevents acknowledgement packets from a datanode
* This should block any write attempts after ackQueue is full.
* Test is blocking, so the MiniDFSCluster has to be killed harshly.
* @throws IOException in case of an error
*/
@Test public void pipeline_06() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
final int MAX_PACKETS=80;
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
final PipelinesTestUtil.PipelinesTest pipst=(PipelinesTestUtil.PipelinesTest)PipelinesTestUtil.initTest();
pipst.setSuspend(true);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
FSDataOutputStream fsOut=fs.create(filePath);
int cnt=0;
try {
QueueChecker cq=new QueueChecker(pipst,MAX_PACKETS);
cq.start();
int bytesToSend=700;
while (cnt < 100 && pipst.getSuspend()) {
if (LOG.isDebugEnabled()) {
LOG.debug("_06(): " + cnt++ + " sending another "+ bytesToSend+ " bytes");
}
TestPipelines.writeData(fsOut,bytesToSend);
}
}
catch ( Exception e) {
LOG.warn("Getting unexpected exception: ",e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Last queued packet number " + pipst.getLastQueued());
}
assertTrue("Shouldn't be able to send more than 81 packet",pipst.getLastQueued() <= 81);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test that copy on write for blocks works correctly
* @throws IOException an exception might be thrown
*/
@Test public void testCopyOnWrite() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
try {
Path file1=new Path("/filestatus.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
writeFile(stm);
stm.close();
DataNode[] dn=cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length,dn.length == 1);
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
List blocks=locations.getLocatedBlocks();
for (int i=0; i < blocks.size(); i=i + 2) {
ExtendedBlock b=blocks.get(i).getBlock();
final File f=DataNodeTestUtils.getFile(dn[0],b.getBlockPoolId(),b.getLocalBlock().getBlockId());
File link=new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to "+ link);
HardLink.createHardLink(f,link);
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned true",DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned false",!DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
}
finally {
client.close();
fs.close();
cluster.shutdown();
}
}
BooleanVerifier
/**
* Test that appends to files at random offsets.
* @throws IOException an exception might be thrown
*/
@Test public void testComplexAppend() throws IOException {
fileContents=AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,2000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,2);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
try {
for (int i=0; i < numberOfFiles; i++) {
final int replication=AppendTestUtil.nextInt(numDatanodes - 2) + 1;
Path testFile=new Path("/" + i + ".dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,testFile,replication);
stm.close();
testFiles.add(testFile);
}
workload=new Workload[numThreads];
for (int i=0; i < numThreads; i++) {
workload[i]=new Workload(cluster,i);
workload[i].start();
}
for (int i=0; i < numThreads; i++) {
try {
System.out.println("Waiting for thread " + i + " to complete...");
workload[i].join();
System.out.println("Waiting for thread " + i + " complete.");
}
catch ( InterruptedException e) {
i--;
}
}
}
finally {
fs.close();
cluster.shutdown();
}
assertTrue("testComplexAppend Worker encountered exceptions.",globalStatus);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test public void testSimpleAppend() throws IOException {
final Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
fileContents=AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
{
Path file1=new Path("/simpleAppend.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
System.out.println("Created file simpleAppend.dat");
int mid=186;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm.write(fileContents,0,mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
int mid2=607;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm=fs.append(file1);
stm.write(fileContents,mid,mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
stm=fs.append(file1);
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file "+ file1);
stm.write(fileContents,mid2,AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
AppendTestUtil.checkFullFile(fs,file1,AppendTestUtil.FILE_SIZE,fileContents,"Read 2");
}
{
FSDataOutputStream out=null;
try {
out=fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
}
catch ( java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
{
Path root=new Path("/");
fs.setPermission(root,new FsPermission((short)0777));
fs.close();
final UserGroupInformation superuser=UserGroupInformation.getCurrentUser();
String username="testappenduser";
String group="testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser=UserGroupInformation.createUserForTesting(username,new String[]{group});
fs=DFSTestUtil.getFileSystemAs(appenduser,conf);
Path dir=new Path(root,getClass().getSimpleName());
Path foo=new Path(dir,"foo.dat");
FSDataOutputStream out=null;
int offset=0;
try {
out=fs.create(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(dir,new FsPermission((short)0100));
fs.setPermission(foo,new FsPermission((short)0200));
out=null;
try {
out=fs.append(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(foo,new FsPermission((short)0577));
fs.setPermission(dir,new FsPermission((short)0777));
out=null;
try {
out=fs.append(foo);
fail("Expected to have AccessControlException");
}
catch ( AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
}
catch ( IOException e) {
System.out.println("Exception :" + e);
throw e;
}
catch ( Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
@Test public void testTC11() throws Exception {
final Path p=new Path("/TC11/foo");
System.out.println("p=" + p);
final int len1=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
FSDataOutputStream out=fs.append(p);
final int len2=(int)BLOCK_SIZE / 2;
AppendTestUtil.write(out,len1,len2);
out.hflush();
final Path pnew=new Path(p + ".new");
assertTrue(fs.rename(p,pnew));
out.close();
final long len=fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len);
final int numblock=locatedblocks.locatedBlockCount();
for (int i=0; i < numblock; i++) {
final LocatedBlock lb=locatedblocks.get(i);
final ExtendedBlock blk=lb.getBlock();
final long size=lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE,size);
}
for ( DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
assertEquals(size,metainfo.getNumBytes());
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout=60000) public void testAppendInsufficientLocations() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,3000);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem=null;
try {
fileSystem=cluster.getFileSystem();
Path f=new Path("/testAppend");
FSDataOutputStream create=fileSystem.create(f,(short)2);
create.write("/testAppend".getBytes());
create.close();
DFSTestUtil.waitReplication(fileSystem,f,(short)2);
LocatedBlocks lbs=fileSystem.dfs.getNamenode().getBlockLocations("/testAppend",0,Long.MAX_VALUE);
List dnsOfCluster=cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations=lbs.getLastLocatedBlock().getLocations();
for ( DataNode dn : dnsOfCluster) {
for ( DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
DFSTestUtil.waitReplication(fileSystem,f,(short)0);
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
}
catch ( IOException e) {
LOG.info("Expected exception: ",e);
}
FSDirectory dir=cluster.getNamesystem().getFSDirectory();
final INodeFile inode=INodeFile.valueOf(dir.getINode("/testAppend"),"/testAppend");
assertTrue("File should remain closed",!inode.isUnderConstruction());
}
finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout=60000) public void testRecoverFinalizedBlock() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testRecoverFinalized");
final OutputStream stm=client.create("/testRecoverFinalized",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("No lease on /testRecoverFinalized")) throw thrownByClose;
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, recovers a file from another writer,
* starts writing from that writer, and then has the old lease holder
* call completeFile
*/
@Test(timeout=60000) public void testCompleteOtherLeaseHoldersFile() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testCompleteOtherLease");
final OutputStream stm=client.create("/testCompleteOtherLease",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Opening file for append from new fs");
FSDataOutputStream appenderStream=fs2.append(file1);
LOG.info("Writing some data from new appender");
AppendTestUtil.write(appenderStream,0,4096);
LOG.info("Telling old close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose;
appenderStream.close();
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/tmp.txt");
final long FILE_LEN=1L;
DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L);
final String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,0);
File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("Data directory does not exist",dataDir.exists());
ExtendedBlock blk=getBlock(bpid,dataDir);
if (blk == null) {
storageDir=cluster.getInstanceStorageDir(0,1);
dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
blk=getBlock(bpid,dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null);
cluster.startDataNodes(conf,1,true,null,null);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),3);
DataNode dataNode=datanodes.get(2);
DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId());
FSNamesystem ns=cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID");
}
finally {
ns.writeUnlock();
}
fs.open(FILE_PATH);
fs.delete(FILE_PATH,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if DFS can handle corrupted blocks properly
*/
@Test public void testFileCorruption() throws Exception {
MiniDFSCluster cluster=null;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
util.createFiles(fs,"/srcdat");
File storageDir=cluster.getInstanceStorageDir(2,0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
File[] blocks=data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir",(blocks != null) && (blocks.length > 0));
for (int idx=0; idx < blocks.length; idx++) {
if (!blocks[idx].getName().startsWith("blk_")) {
continue;
}
System.out.println("Deliberately removing file " + blocks[idx].getName());
assertTrue("Cannot remove file.",blocks[idx].delete());
}
assertTrue("Corrupted replicas not handled properly.",util.checkFiles(fs,"/srcdat"));
util.cleanup(fs,"/srcdat");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that file data does not become corrupted even in the face of errors.
*/
@Test public void testFileCreationError1() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
try {
Path file1=new Path("/filestatus.dat");
FSDataOutputStream stm=createFile(fs,file1,1);
assertTrue(file1 + " should be a file",fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
cluster.shutdownDataNodes();
while (true) {
DatanodeInfo[] info=client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
if (info.length == 0) {
break;
}
System.out.println("testFileCreationError1: waiting for datanode " + " to die.");
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
try {
stm.write(buffer);
stm.close();
}
catch ( Exception e) {
System.out.println("Encountered expected exception");
}
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up",locations.locatedBlockCount() == 0);
}
finally {
cluster.shutdown();
client.close();
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "foo";
final Path fpath=new Path(f);
HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl=out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE);
assertEquals(1,locations.locatedBlockCount());
LocatedBlock locatedblock=locations.getLocatedBlocks().get(0);
int successcount=0;
for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk=locatedblock.getBlock();
Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in=new BufferedReader(new FileReader(blockfile));
assertEquals("something",in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
BooleanVerifier
/**
* Test that all open files are closed when client dies abnormally.
*/
@Test public void testDFSClientDeath() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
System.out.println("Testing adbornal client death.");
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSClient dfsclient=dfs.dfs;
try {
Path file1=new Path("/clienttest.dat");
FSDataOutputStream stm=createFile(fs,file1,1);
System.out.println("Created file clienttest.dat");
writeFile(stm);
dfsclient.close();
assertTrue(file1 + " does not exist.",AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test public void testFileCreationNamenodeRestart() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path file1=new Path("/filestatus.dat");
HdfsDataOutputStream stm=create(fs,file1,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
writeFile(stm,numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
Path fileRenamed=new Path("/filestatusRenamed.dat");
fs.rename(file1,fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed);
file1=fileRenamed;
Path file2=new Path("/filestatus2.dat");
FSDataOutputStream stm2=createFile(fs,file2,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
Path file3=new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3=createFile(fs,file3,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4=new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4=createFile(fs,file4,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"),new Path("/bin"));
Path file3new=new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new);
Path file4new=new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream());
dfstream.setTestFilename(file1.toString());
dfstream=(DFSOutputStream)(stm3.getWrappedStream());
dfstream.setTestFilename(file3new.toString());
dfstream=(DFSOutputStream)(stm4.getWrappedStream());
dfstream.setTestFilename(file4new.toString());
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
DFSClient client=fs.dfs;
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3);
locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test deleteOnExit
*/
@Test public void testDeleteOnExit() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
FileSystem localfs=FileSystem.getLocal(conf);
try {
Path file1=new Path("filestatus.dat");
Path file2=new Path("filestatus2.dat");
Path file3=new Path("filestatus3.dat");
FSDataOutputStream stm1=createFile(fs,file1,1);
FSDataOutputStream stm2=createFile(fs,file2,1);
FSDataOutputStream stm3=createFile(localfs,file3,1);
System.out.println("DeleteOnExit: Created files.");
writeFile(stm1);
writeFile(stm3);
stm1.close();
stm2.close();
stm3.close();
fs.deleteOnExit(file1);
fs.deleteOnExit(file2);
localfs.deleteOnExit(file3);
fs.close();
localfs.close();
fs=null;
localfs=null;
fs=cluster.getFileSystem();
localfs=FileSystem.getLocal(conf);
assertTrue(file1 + " still exists inspite of deletOnExit set.",!fs.exists(file1));
assertTrue(file2 + " still exists inspite of deletOnExit set.",!fs.exists(file2));
assertTrue(file3 + " still exists inspite of deletOnExit set.",!localfs.exists(file3));
System.out.println("DeleteOnExit successful.");
}
finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(localfs);
cluster.shutdown();
}
}
BooleanVerifier
/**
* Test file creation using createNonRecursive().
*/
@Test public void testFileCreationNonRecursive() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
final Path path=new Path("/" + Time.now() + "-testFileCreationNonRecursive");
FSDataOutputStream out=null;
try {
IOException expectedException=null;
final String nonExistDir="/non-exist-" + Time.now();
fs.delete(new Path(nonExistDir),true);
EnumSet createFlag=EnumSet.of(CreateFlag.CREATE);
out=createNonRecursive(fs,path,1,createFlag);
out.close();
try {
createNonRecursive(fs,new Path(path,"Create"),1,createFlag);
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a file when parent directory exists as a file" + " should throw ParentNotDirectoryException ",expectedException != null && expectedException instanceof ParentNotDirectoryException);
fs.delete(path,true);
final Path path2=new Path(nonExistDir + "/testCreateNonRecursive");
expectedException=null;
try {
createNonRecursive(fs,path2,1,createFlag);
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a file in a non-exist dir using" + " createNonRecursive() should throw FileNotFoundException ",expectedException != null && expectedException instanceof FileNotFoundException);
EnumSet overwriteFlag=EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE);
out=createNonRecursive(fs,path,1,overwriteFlag);
out.close();
expectedException=null;
try {
createNonRecursive(fs,new Path(path,"Overwrite"),1,overwriteFlag);
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Overwrite a file when parent directory exists as a file" + " should throw ParentNotDirectoryException ",expectedException != null && expectedException instanceof ParentNotDirectoryException);
fs.delete(path,true);
final Path path3=new Path(nonExistDir + "/testOverwriteNonRecursive");
expectedException=null;
try {
createNonRecursive(fs,path3,1,overwriteFlag);
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Overwrite a file in a non-exist dir using" + " createNonRecursive() should throw FileNotFoundException ",expectedException != null && expectedException instanceof FileNotFoundException);
}
finally {
fs.close();
cluster.shutdown();
}
}
BooleanVerifier
@Test public void testFsCloseAfterClusterShutdown() throws IOException {
System.out.println("test testFsCloseAfterClusterShutdown start");
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY,3);
conf.setBoolean("ipc.client.ping",false);
conf.setInt("ipc.ping.interval",10000);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "testFsCloseAfterClusterShutdown";
final Path fpath=new Path(f);
FSDataOutputStream out=TestFileCreation.createFile(dfs,fpath,DATANODE_NUM);
out.write("something_test".getBytes());
out.hflush();
cluster.stopDataNode(2);
boolean hasException=false;
try {
out.close();
System.out.println("testFsCloseAfterClusterShutdown: Error here");
}
catch ( IOException e) {
hasException=true;
}
assertTrue("Failed to close file after cluster shutdown",hasException);
}
finally {
System.out.println("testFsCloseAfterClusterShutdown successful");
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFileCreationDeleteParent() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir=new Path("/foo");
Path file1=new Path(dir,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1,1000);
stm1.hflush();
Path file2=new Path("/file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2,1000);
stm2.hflush();
fs.delete(dir,true);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
}
finally {
fs.close();
cluster.shutdown();
}
}
BooleanVerifier
/**
* This test creates three empty files and lets their leases expire.
* This triggers release of the leases.
* The empty files are supposed to be closed by that
* without causing ConcurrentModificationException.
*/
@Test public void testLeaseExpireEmptyFiles() throws Exception {
final Thread.UncaughtExceptionHandler oldUEH=Thread.getDefaultUncaughtExceptionHandler();
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler(){
@Override public void uncaughtException( Thread t, Throwable e){
if (e instanceof ConcurrentModificationException) {
LeaseManager.LOG.error("t=" + t,e);
isConcurrentModificationException=true;
}
}
}
);
System.out.println("testLeaseExpireEmptyFiles start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
TestFileCreation.createFile(dfs,new Path("/foo"),DATANODE_NUM);
TestFileCreation.createFile(dfs,new Path("/foo2"),DATANODE_NUM);
TestFileCreation.createFile(dfs,new Path("/foo3"),DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
assertFalse(isConcurrentModificationException);
}
finally {
Thread.setDefaultUncaughtExceptionHandler(oldUEH);
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests the fileLength when we sync the file and restart the cluster and
* Datanodes not report to Namenode yet.
*/
@Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
HdfsDataInputStream in=null;
try {
Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(path);
int fileLength=1030;
out.write(new byte[fileLength]);
out.hsync();
cluster.restartNameNode();
cluster.waitActive();
in=(HdfsDataInputStream)dfs.open(path,1024);
Assert.assertEquals(fileLength,in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
verifyNNIsInSafeMode(dfs);
try {
in=(HdfsDataInputStream)dfs.open(path);
Assert.fail("Expected IOException");
}
catch ( IOException e) {
Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0);
}
}
finally {
if (null != in) {
in.close();
}
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the FileStatus obtained calling listStatus on a file
*/
@Test public void testListStatusOnFile() throws IOException {
FileStatus[] stats=fs.listStatus(file1);
assertEquals(1,stats.length);
FileStatus status=stats[0];
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
RemoteIterator itor=fc.listStatus(file1);
status=itor.next();
assertEquals(stats[0],status);
assertFalse(file1 + " should be a file",status.isDirectory());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the FileStatus obtained calling getFileStatus on a file
*/
@Test public void testGetFileStatusOnFile() throws Exception {
checkFile(fs,file1,1);
FileStatus status=fs.getFileStatus(file1);
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test getting a FileStatus object using a non-existant path
*/
@Test public void testGetFileStatusOnNonExistantFileDir() throws IOException {
Path dir=new Path("/test/mkdirs");
try {
fs.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fc.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fs.getFileStatus(dir);
fail("getFileStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertTrue("Exception doesn't indicate non-existant path",fe.getMessage().startsWith("File does not exist"));
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test calling getFileInfo directly on the client
*/
@Test public void testGetFileInfo() throws IOException {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null",null,fileInfo);
Path path1=new Path("/name1");
Path path2=new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out=fs.create(path2,false);
out.close();
fileInfo=dfsClient.getFileInfo(path1.toString());
assertEquals(1,fileInfo.getChildrenNum());
fileInfo=dfsClient.getFileInfo(path2.toString());
assertEquals(0,fileInfo.getChildrenNum());
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
}
catch ( RemoteException re) {
assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test FileStatus objects obtained from a directory
*/
@Test public void testGetFileStatusOnDir() throws Exception {
Path dir=new Path("/test/mkdirs");
assertTrue("mkdir failed",fs.mkdirs(dir));
assertTrue("mkdir failed",fs.exists(dir));
FileStatus status=fs.getFileStatus(dir);
assertTrue(dir + " should be a directory",status.isDirectory());
assertTrue(dir + " should be zero size ",status.getLen() == 0);
assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
FileStatus[] stats=fs.listStatus(dir);
assertEquals(dir + " should be empty",0,stats.length);
assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength());
RemoteIterator itor=fc.listStatus(dir);
assertFalse(dir + " should be empty",itor.hasNext());
Path file2=new Path(dir,"filestatus2.dat");
DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file2,1);
status=fs.getFileStatus(file2);
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
file2=fs.makeQualified(file2);
assertEquals(file2.toString(),status.getPath().toString());
Path file3=new Path(dir,"filestatus3.dat");
DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file3,1);
file3=fs.makeQualified(file3);
final int expected=blockSize / 2;
assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength());
stats=fs.listStatus(dir);
assertEquals(dir + " should have two entries",2,stats.length);
assertEquals(file2.toString(),stats[0].getPath().toString());
assertEquals(file3.toString(),stats[1].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir3=fs.makeQualified(new Path(dir,"dir3"));
fs.mkdirs(dir3);
dir3=fs.makeQualified(dir3);
stats=fs.listStatus(dir);
assertEquals(dir + " should have three entries",3,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(file2.toString(),stats[1].getPath().toString());
assertEquals(file3.toString(),stats[2].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir4=fs.makeQualified(new Path(dir,"dir4"));
fs.mkdirs(dir4);
dir4=fs.makeQualified(dir4);
Path dir5=fs.makeQualified(new Path(dir,"dir5"));
fs.mkdirs(dir5);
dir5=fs.makeQualified(dir5);
stats=fs.listStatus(dir);
assertEquals(dir + " should have five entries",5,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(dir4.toString(),stats[1].getPath().toString());
assertEquals(dir5.toString(),stats[2].getPath().toString());
assertEquals(file2.toString(),stats[3].getPath().toString());
assertEquals(file3.toString(),stats[4].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(dir4.toString(),itor.next().getPath().toString());
assertEquals(dir5.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse(itor.hasNext());
fs.delete(dir,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify datanode port usage.
*/
@Test public void testDataNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,new File(hdfsDir,"data").getPath());
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,FileSystem.getDefaultUri(config).getAuthority());
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
boolean started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,THIS_HOST);
started=canStartDataNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify BackupNode port usage.
*/
@Test public void testBackupNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration backup_config=new HdfsConfiguration(config);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,THIS_HOST);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
assertFalse("Backup started on same port as Namenode",canStartBackupNode(backup_config));
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
boolean started=canStartBackupNode(backup_config);
assertTrue("Backup Namenode should've started",started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify secondary namenode port usage.
*/
@Test public void testSecondaryNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
boolean started=canStartSecondaryNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
started=canStartSecondaryNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
BooleanVerifier
@Test public void testHFlushInterrupted() throws Exception {
final int DATANODE_NUM=2;
final int fileLen=6;
byte[] fileContents=AppendTestUtil.initBuffer(fileLen);
Configuration conf=new HdfsConfiguration();
final Path p=new Path("/hflush-interrupted");
System.out.println("p=" + p);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
DistributedFileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=AppendTestUtil.createFile(fs,p,DATANODE_NUM);
stm.write(fileContents,0,2);
Thread.currentThread().interrupt();
try {
stm.hflush();
assertTrue(Thread.currentThread().interrupted());
}
catch ( InterruptedIOException ie) {
System.out.println("Got expected exception during flush");
}
assertFalse(Thread.currentThread().interrupted());
stm.hflush();
stm.write(fileContents,2,2);
stm.hflush();
stm.write(fileContents,4,2);
Thread.currentThread().interrupt();
try {
stm.close();
assertTrue(Thread.currentThread().interrupted());
}
catch ( InterruptedIOException ioe) {
System.out.println("Got expected exception during close");
assertFalse(Thread.currentThread().interrupted());
stm.close();
}
AppendTestUtil.checkFullFile(fs,p,fileLen,fileContents,"Failed to deal with thread interruptions");
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
FileSystem fs=null;
try {
fs=FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH,10);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH,10);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
}
finally {
if (fs != null) {
fs.close();
}
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testClientDatanodeProtocol() throws IOException {
ClientDatanodeProtocolTranslatorPB translator=new ClientDatanodeProtocolTranslatorPB(nnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf));
assertFalse(translator.isMethodSupported("refreshNamenodes"));
translator=new ClientDatanodeProtocolTranslatorPB(dnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
APIUtilityVerifier BooleanVerifier
@Test public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator=(JournalProtocolTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,JournalProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertFalse(translator.isMethodSupported("startLogSegment"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshCallQueueProtocol() throws IOException {
RefreshCallQueueProtocolClientSideTranslatorPB translator=(RefreshCallQueueProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshCallQueueProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshCallQueue"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testInterDatanodeProtocol() throws IOException {
InterDatanodeProtocolTranslatorPB translator=new InterDatanodeProtocolTranslatorPB(nnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf),0);
assertFalse(translator.isMethodSupported("initReplicaRecovery"));
translator=new InterDatanodeProtocolTranslatorPB(dnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf),0);
assertTrue(translator.isMethodSupported("initReplicaRecovery"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshUserMappingsProtocol() throws IOException {
RefreshUserMappingsProtocolClientSideTranslatorPB translator=(RefreshUserMappingsProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshUserMappingsProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshUserToGroupsMappings"));
}
BooleanVerifier
@Test public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator=new DatanodeProtocolClientSideTranslatorPB(nnAddress,conf);
assertTrue(translator.isMethodSupported("sendHeartbeat"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshAuthorizationPolicyProtocol() throws IOException {
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB translator=(RefreshAuthorizationPolicyProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshAuthorizationPolicyProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshServiceAcl"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testNamenodeProtocol() throws IOException {
NamenodeProtocol np=NameNodeProxies.createNonHAProxy(conf,nnAddress,NamenodeProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
boolean exists=RpcClientUtil.isMethodSupported(np,NamenodeProtocolPB.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(NamenodeProtocolPB.class),"rollEditLog");
assertTrue(exists);
exists=RpcClientUtil.isMethodSupported(np,NamenodeProtocolPB.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(NamenodeProtocolPB.class),"bogusMethod");
assertFalse(exists);
}
APIUtilityVerifier BooleanVerifier
@Test public void testGetUserMappingsProtocol() throws IOException {
GetUserMappingsProtocolClientSideTranslatorPB translator=(GetUserMappingsProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,GetUserMappingsProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("getGroupsForUser"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception {
final String[] groups=new String[]{"supergroup"};
final UserGroupInformation[] ugi=new UserGroupInformation[3];
for (int i=0; i < ugi.length; i++) {
ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups);
}
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final Configuration conf=new Configuration();
final DFSClient c1=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out1=createFsOut(c1,"/out1");
final DFSClient c2=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out2=createFsOut(c2,"/out2");
Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer());
final DFSClient c3=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out3=createFsOut(c3,"/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out4=createFsOut(c4,"/out4");
Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer());
final DFSClient c5=createDFSClientAs(ugi[2],conf);
FSDataOutputStream out5=createFsOut(c5,"/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient dfs=new DFSClient(null,spyNN,conf,null);
byte[] buf=new byte[1024];
FSDataOutputStream c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
DFSInputStream c_in=dfs.open(dirString + "c");
FSDataOutputStream d_out=createFsOut(dfs,dirString + "d");
doThrow(new RemoteException(InvalidToken.class.getName(),"Your token is worthless")).when(spyNN).renewLease(anyString());
LeaseRenewer originalRenewer=dfs.getLeaseRenewer();
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
}
catch ( IOException e) {
}
try {
d_out.write(buf,0,1024);
LOG.info("Write worked beyond the soft limit as expected.");
}
catch ( IOException e) {
Assert.fail("Write failed.");
}
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
try {
d_out.write(buf,0,1024);
d_out.close();
Assert.fail("Write did not fail even after the fatal lease renewal failure");
}
catch ( IOException e) {
LOG.info("Write failed as expected. ",e);
}
Thread.sleep(1000);
Assert.assertTrue(originalRenewer.isEmpty());
doNothing().when(spyNN).renewLease(anyString());
try {
int num=c_in.read(buf,0,1);
if (num != 1) {
Assert.fail("Failed to read 1 byte");
}
c_in.close();
}
catch ( IOException e) {
LOG.error("Read failed with ",e);
Assert.fail("Read after lease renewal failure failed");
}
try {
c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
}
catch ( IOException e) {
LOG.error("Write failed with ",e);
Assert.fail("Write failed");
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we can open up a file for write, move it to another location,
* and then create a new file in the previous location, without causing any
* lease conflicts. This is possible because we now use unique inode IDs
* to identify files to the NameNode.
*/
@Test public void testLeaseAfterRenameAndRecreate() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final Path path1=new Path("/test-file");
final String contents1="contents1";
final Path path2=new Path("/test-file-new-location");
final String contents2="contents2";
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out1=fs.create(path1);
out1.writeBytes(contents1);
Assert.assertTrue(hasLease(cluster,path1));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
fs2.rename(path1,path2);
FSDataOutputStream out2=fs2.create(path1);
out2.writeBytes(contents2);
out2.close();
Assert.assertTrue(hasLease(cluster,path2));
out1.close();
DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2));
Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p=new Path("/test-file");
Path d=new Path("/test-d");
Path d2=new Path("/test-d-other");
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(p);
out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,p));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
LOG.info("DMS: rename file into dir");
Path pRenamed=new Path(d,p.getName());
fs2.mkdirs(d);
fs2.rename(p,pRenamed);
Assert.assertFalse(p + " exists",fs2.exists(p));
Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed));
Assert.assertFalse("has lease for " + p,hasLease(cluster,p));
Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent dir");
Path pRenamedAgain=new Path(d2,pRenamed.getName());
fs2.rename(d,d2);
Assert.assertFalse(d + " exists",fs2.exists(d));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent again");
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName());
fs2.mkdirs(d);
fs2.rename(d2,d);
Assert.assertFalse(d2 + " exists",fs2.exists(d2));
Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d2,p.getName());
fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d,p.getName());
fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
out.close();
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testLease() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
Assert.assertTrue(fs.mkdirs(dir));
Path a=new Path(dir,"a");
Path b=new Path(dir,"b");
DataOutputStream a_out=fs.create(a);
a_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,a));
Assert.assertTrue(!hasLease(cluster,b));
DataOutputStream b_out=fs.create(b);
b_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,a));
Assert.assertTrue(hasLease(cluster,b));
a_out.close();
b_out.close();
Assert.assertTrue(!hasLease(cluster,a));
Assert.assertTrue(!hasLease(cluster,b));
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE=3000;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM);
LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr);
DatanodeInfo[] datanodeinfos=locatedblock.getLocations();
assertEquals(REPLICATION_NUM,datanodeinfos.length);
DataNode[] datanodes=new DataNode[REPLICATION_NUM];
for (int i=0; i < REPLICATION_NUM; i++) {
datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
ExtendedBlock lastblock=locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for (int i=0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock,datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName);
waitLeaseRecovery(cluster);
Block[] updatedmetainfo=new Block[REPLICATION_NUM];
long oldSize=lastblock.getNumBytes();
lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock();
long currentGS=lastblock.getGenerationStamp();
for (int i=0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId());
assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId());
assertEquals(oldSize,updatedmetainfo[i].getNumBytes());
assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp());
}
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr="/foo.safemode";
filepath=new Path(filestr);
dfs.create(filepath,(short)1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs,filepath,(short)1);
waitLeaseRecovery(cluster);
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Block Recovery when the meta file not having crcs for all chunks in block
* file
*/
@Test public void testBlockRecoveryWithLessMetafile() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Path file=new Path("/testRecoveryFile");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(file);
int count=0;
while (count < 2 * 1024 * 1024) {
out.writeBytes("Data");
count+=4;
}
out.hsync();
((DFSOutputStream)out.getWrappedStream()).abort();
LocatedBlocks locations=cluster.getNameNodeRpc().getBlockLocations(file.toString(),0,count);
ExtendedBlock block=locations.get(0).getBlock();
DataNode dn=cluster.getDataNodes().get(0);
BlockLocalPathInfo localPathInfo=dn.getBlockLocalPathInfo(block,null);
File metafile=new File(localPathInfo.getMetaPath());
assertTrue(metafile.exists());
RandomAccessFile raf=new RandomAccessFile(metafile,"rw");
raf.setLength(metafile.length() - 20);
raf.close();
DataNodeProperties dnProp=cluster.stopDataNode(0);
cluster.restartDataNode(dnProp,true);
DistributedFileSystem newdfs=(DistributedFileSystem)FileSystem.newInstance(cluster.getConfiguration(0));
count=0;
while (++count < 10 && !newdfs.recoverLease(file)) {
Thread.sleep(1000);
}
assertTrue("File should be closed",newdfs.recoverLease(file));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
* @throws Exception
*/
@Test public void testHardLeaseRecovery() throws Exception {
String filestr="/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD);
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size);
}
while (locatedBlocks.isUnderConstruction());
assertEquals(size,locatedBlocks.getFileLength());
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
}
catch ( IOException e) {
e.printStackTrace();
}
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
* The test makes sure that the lease recovery completes.
* @throws Exception
*/
@Test public void testSoftLeaseRecovery() throws Exception {
Map u2g_map=new HashMap(1);
u2g_map.put(fakeUsername,new String[]{fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf,u2g_map);
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,HdfsConstants.LEASE_HARDLIMIT_PERIOD);
String filestr="/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(SHORT_LEASE_PERIOD,LONG_LEASE_PERIOD);
{
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(fakeUsername,new String[]{fakeGroup});
FileSystem dfs2=DFSTestUtil.getFileSystemAs(ugi,conf);
boolean done=false;
for (int i=0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath,false,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
}
catch ( FileAlreadyExistsException ex) {
done=true;
}
catch ( AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
}
catch ( IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException",ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "+ "Validating its contents now...");
long fileSize=dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually "+ " found to be "+ fileSize+ " bytes",fileSize == size);
AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testThreadName() throws Exception {
DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class);
long fileId=789L;
Assert.assertFalse("Renewer not initially running",renewer.isRunning());
renewer.put(fileId,mockStream,MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",renewer.isRunning());
String threadName=renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName);
renewer.closeFile(fileId,MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.now());
long failTime=Time.now() + 5000;
while (renewer.isRunning() && Time.now() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fc.mkdir(TEST_DIR,FsPermission.getDefault(),true);
writeFile(fc,FILE1,FILE_LEN);
RemoteIterator itor=fc.util().listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a directory
*/
@Test public void testDirectory() throws IOException {
fc.mkdir(DIR1,FsPermission.getDefault(),true);
RemoteIterator itor=fc.util().listFiles(DIR1,true);
assertFalse(itor.hasNext());
itor=fc.util().listFiles(DIR1,false);
assertFalse(itor.hasNext());
writeFile(fc,FILE2,FILE_LEN);
itor=fc.util().listFiles(DIR1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(DIR1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
itor=fc.util().listFiles(TEST_DIR,true);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(TEST_DIR,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input patch has a symbolic links as its children
*/
@Test public void testSymbolicLinks() throws IOException {
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE2,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
Path dir4=new Path(TEST_DIR,"dir4");
Path dir5=new Path(dir4,"dir5");
Path file4=new Path(dir4,"file4");
fc.createSymlink(DIR1,dir5,true);
fc.createSymlink(FILE1,file4,true);
RemoteIterator itor=fc.util().listFiles(dir4,true);
LocatedFileStatus stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(dir4,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests get/set working directory in DFS.
*/
@Test(timeout=20000) public void testWorkingDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=cluster.getFileSystem();
try {
Path orig_path=fileSys.getWorkingDirectory();
assertTrue(orig_path.isAbsolute());
Path file1=new Path("somewhat/random.txt");
writeFile(fileSys,file1);
assertTrue(fileSys.exists(new Path(orig_path,file1.toString())));
fileSys.delete(file1,true);
Path subdir1=new Path("/somewhere");
fileSys.setWorkingDirectory(subdir1);
writeFile(fileSys,file1);
cleanupFile(fileSys,new Path(subdir1,file1.toString()));
Path subdir2=new Path("else");
fileSys.setWorkingDirectory(subdir2);
writeFile(fileSys,file1);
readFile(fileSys,file1);
cleanupFile(fileSys,new Path(new Path(subdir1,subdir2.toString()),file1.toString()));
Path home=fileSys.makeQualified(new Path("/user/" + getUserName(fileSys)));
Path fsHome=fileSys.getHomeDirectory();
assertEquals(home,fsHome);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testDualClusters() throws Throwable {
File testDataCluster2=new File(testDataPath,CLUSTER_2);
File testDataCluster3=new File(testDataPath,CLUSTER_3);
Configuration conf=new HdfsConfiguration();
String c2Path=testDataCluster2.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path);
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster3=null;
try {
String dataDir2=cluster2.getDataDirectory();
assertEquals(new File(c2Path + "/data"),new File(dataDir2));
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath());
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf);
cluster3=builder.build();
String dataDir3=cluster3.getDataDirectory();
assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3));
}
finally {
MiniDFSCluster.shutdownCluster(cluster3);
MiniDFSCluster.shutdownCluster(cluster2);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,0);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
int fileLen=10 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,fileLen / 2);
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final BlockManager bm=cluster.getNamesystem().getBlockManager();
DistributedFileSystem dfs=cluster.getFileSystem();
DFSTestUtil.createFile(dfs,new Path("/testMissingBlocksAlert/file1"),fileLen,(short)3,0);
Path corruptFile=new Path("/testMissingBlocks/corruptFile");
DFSTestUtil.createFile(dfs,corruptFile,fileLen,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(dfs,corruptFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
FSDataInputStream in=dfs.open(corruptFile);
try {
in.readFully(new byte[fileLen]);
}
catch ( ChecksumException ignored) {
}
in.close();
LOG.info("Waiting for missing blocks count to increase...");
while (dfs.getMissingBlocksCount() <= 0) {
Thread.sleep(100);
}
assertTrue(dfs.getMissingBlocksCount() == 1);
assertEquals(4,dfs.getUnderReplicatedBlocksCount());
assertEquals(3,bm.getUnderReplicatedNotMissingBlocks());
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
Assert.assertEquals(1,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
dfs.delete(corruptFile,true);
LOG.info("Waiting for missing blocks count to be zero...");
while (dfs.getMissingBlocksCount() > 0) {
Thread.sleep(100);
}
assertEquals(2,dfs.getUnderReplicatedBlocksCount());
assertEquals(2,bm.getUnderReplicatedNotMissingBlocks());
Assert.assertEquals(0,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Regression test for HDFS-3864 - NN does not update internal file mtime for
* OP_CLOSE when reading from the edit log.
*/
@Test public void testModTimePersistsAfterRestart() throws IOException {
final long sleepTime=10;
MiniDFSCluster cluster=null;
FileSystem fs=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).build();
fs=cluster.getFileSystem();
Path testPath=new Path("/test");
OutputStream out=fs.create(testPath);
long initialModTime=fs.getFileStatus(testPath).getModificationTime();
assertTrue(initialModTime > 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
out.close();
long modTimeAfterClose=fs.getFileStatus(testPath).getModificationTime();
assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
cluster.restartNameNode();
long modTimeAfterRestart=fs.getFileStatus(testPath).getModificationTime();
assertEquals(modTimeAfterClose,modTimeAfterRestart);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests modification time in DFS.
*/
@Test public void testModTime() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
int replicas=numDatanodes - 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1=new Path("testdir1");
Path file1=new Path(dir1,"test1.dat");
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)replicas,seed);
FileStatus stat=fileSys.getFileStatus(file1);
long mtime1=stat.getModificationTime();
assertTrue(mtime1 != 0);
stat=fileSys.getFileStatus(dir1);
long mdir1=stat.getModificationTime();
System.out.println("Creating testdir1/test2.dat.");
Path file2=new Path(dir1,"test2.dat");
DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,(short)replicas,seed);
stat=fileSys.getFileStatus(file2);
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() >= mdir1);
mdir1=stat.getModificationTime();
Path dir2=fileSys.makeQualified(new Path("testdir2/"));
System.out.println("Creating testdir2 " + dir2);
assertTrue(fileSys.mkdirs(dir2));
stat=fileSys.getFileStatus(dir2);
long mdir2=stat.getModificationTime();
Path newfile=new Path(dir2,"testnew.dat");
System.out.println("Moving " + file1 + " to "+ newfile);
fileSys.rename(file1,newfile);
stat=fileSys.getFileStatus(newfile);
assertTrue(stat.getModificationTime() == mtime1);
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() != mdir1);
mdir1=stat.getModificationTime();
stat=fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2=stat.getModificationTime();
System.out.println("Deleting testdir2/testnew.dat.");
assertTrue(fileSys.delete(newfile,true));
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() == mdir1);
stat=fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2=stat.getModificationTime();
cleanupFile(fileSys,file2);
cleanupFile(fileSys,dir1);
cleanupFile(fileSys,dir2);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testEviction() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnIds[]=new DatanodeID[CAPACITY + 1];
FakePeer peers[]=new FakePeer[CAPACITY + 1];
for (int i=0; i < dnIds.length; ++i) {
dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id_" + i,100,101,102,103);
peers[i]=new FakePeer(dnIds[i],false);
}
for (int i=0; i < CAPACITY; ++i) {
cache.put(dnIds[i],peers[i]);
}
assertEquals(CAPACITY,cache.size());
cache.put(dnIds[CAPACITY],peers[CAPACITY]);
assertEquals(CAPACITY,cache.size());
assertSame(null,cache.get(dnIds[0],false));
for (int i=1; i < CAPACITY; ++i) {
Peer peer=cache.get(dnIds[i],false);
assertSame(peers[i],peer);
assertTrue(!peer.isClosed());
peer.close();
}
assertEquals(1,cache.size());
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDomainSocketPeers() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
HashMultiset peers=HashMultiset.create(CAPACITY);
for (int i=0; i < CAPACITY; ++i) {
FakePeer peer=new FakePeer(dnId,i == CAPACITY - 1);
peers.add(peer);
cache.put(dnId,peer);
}
assertEquals(CAPACITY,cache.size());
Peer peer=cache.get(dnId,true);
assertTrue(peer.getDomainSocket() != null);
peers.remove(peer);
peer=cache.get(dnId,true);
assertTrue(peer == null);
while (!peers.isEmpty()) {
peer=cache.get(dnId,false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0,cache.size());
cache.close();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddAndRetrieve() throws Exception {
PeerCache cache=new PeerCache(3,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
FakePeer peer=new FakePeer(dnId,false);
cache.put(dnId,peer);
assertTrue(!peer.isClosed());
assertEquals(1,cache.size());
assertEquals(peer,cache.get(dnId,false));
assertEquals(0,cache.size());
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testExpiry() throws Exception {
final int CAPACITY=3;
final int EXPIRY_PERIOD=10;
PeerCache cache=new PeerCache(CAPACITY,EXPIRY_PERIOD);
DatanodeID dnIds[]=new DatanodeID[CAPACITY];
FakePeer peers[]=new FakePeer[CAPACITY];
for (int i=0; i < CAPACITY; ++i) {
dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id",100,101,102,103);
peers[i]=new FakePeer(dnIds[i],false);
}
for (int i=0; i < CAPACITY; ++i) {
cache.put(dnIds[i],peers[i]);
}
Thread.sleep(EXPIRY_PERIOD * 50);
assertEquals(0,cache.size());
for (int i=0; i < CAPACITY; ++i) {
assertTrue(peers[i].isClosed());
}
Thread.sleep(EXPIRY_PERIOD * 50);
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiplePeersWithSameKey() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
HashMultiset peers=HashMultiset.create(CAPACITY);
for (int i=0; i < CAPACITY; ++i) {
FakePeer peer=new FakePeer(dnId,false);
peers.add(peer);
cache.put(dnId,peer);
}
assertEquals(CAPACITY,cache.size());
while (!peers.isEmpty()) {
Peer peer=cache.get(dnId,false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0,cache.size());
cache.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
long len=0;
FSDataOutputStream stream;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status=fs.getFileStatus(FILE_PATH);
len=status.getLen();
Thread.sleep(100);
}
DFSClient dfsclient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
HdfsFileStatus fileStatus=dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(FILE_NAME,0,BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS,blocks.getLocatedBlocks().size());
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileStatus.getFileId(),FILE_NAME,dfsclient.clientName);
cluster.restartNameNode();
FileStatus status=fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(),status.getLen() == len - BLOCK_SIZE);
FSDataInputStream readStream=fs.open(FILE_PATH);
try {
byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length);
byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,expectedBuf.length);
assertArrayEquals(expectedBuf,verifyBuf);
}
finally {
IOUtils.closeStream(readStream);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
* @throws IOException in case of an error
*/
@Test public void pipeline_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs=fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream)ofs.getWrappedStream()).hflush();
List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks();
String bpid=cluster.getNamesystem().getBlockPoolId();
for ( DataNode dn : cluster.getDataNodes()) {
Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null",r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState());
}
ofs.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testMaxOutHedgedReadPool() throws IOException, InterruptedException, ExecutionException {
isHedgedRead=true;
Configuration conf=new Configuration();
int numHedgedReadPoolThreads=5;
final int initialHedgedReadTimeoutMillis=50000;
final int fixedSleepIntervalMillis=50;
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads);
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,initialHedgedReadTimeoutMillis);
DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector=DFSClientFaultInjector.instance;
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(fixedSleepIntervalMillis);
return null;
}
}
).when(injector).startFetchFromDatanode();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
DistributedFileSystem fileSys=cluster.getFileSystem();
DFSClient dfsClient=fileSys.getClient();
DFSHedgedReadMetrics metrics=dfsClient.getHedgedReadMetrics();
metrics.hedgedReadOps.set(0);
metrics.hedgedReadOpsWin.set(0);
metrics.hedgedReadOpsInCurThread.set(0);
try {
Path file1=new Path("hedgedReadMaxOut.dat");
writeFile(fileSys,file1);
pReadFile(fileSys,file1);
assertTrue(metrics.getHedgedReadOps() == 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
dfsClient.setHedgedReadTimeout(50);
pReadFile(fileSys,file1);
assertTrue(metrics.getHedgedReadOps() > 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
int factor=10;
int numHedgedReads=numHedgedReadPoolThreads * factor;
long initialReadOpsValue=metrics.getHedgedReadOps();
ExecutorService executor=Executors.newFixedThreadPool(numHedgedReads);
ArrayList> futures=new ArrayList>();
for (int i=0; i < numHedgedReads; i++) {
futures.add(executor.submit(getPReadFileCallable(fileSys,file1)));
}
for (int i=0; i < numHedgedReads; i++) {
futures.get(i).get();
}
assertTrue(metrics.getHedgedReadOps() > initialReadOpsValue);
assertTrue(metrics.getHedgedReadOpsInCurThread() > 0);
cleanupFile(fileSys,file1);
executor.shutdown();
}
finally {
fileSys.close();
cluster.shutdown();
Mockito.reset(injector);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHedgedReadLoopTooManyTimes() throws IOException {
Configuration conf=new Configuration();
int numHedgedReadPoolThreads=5;
final int hedgedReadTimeoutMillis=50;
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads);
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,hedgedReadTimeoutMillis);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,0);
DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector=DFSClientFaultInjector.instance;
final int sleepMs=100;
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
if (DFSClientFaultInjector.exceptionNum.compareAndSet(0,1)) {
System.out.println("-------------- throw Checksum Exception");
throw new ChecksumException("ChecksumException test",100);
}
}
return null;
}
}
).when(injector).fetchFromDatanodeException();
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(sleepMs * 2);
}
return null;
}
}
).when(injector).readFromDatanodeDelay();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
DistributedFileSystem fileSys=cluster.getFileSystem();
DFSClient dfsClient=fileSys.getClient();
FSDataOutputStream output=null;
DFSInputStream input=null;
String filename="/hedgedReadMaxOut.dat";
try {
Path file=new Path(filename);
output=fileSys.create(file,(short)2);
byte[] data=new byte[64 * 1024];
output.write(data);
output.flush();
output.write(data);
output.flush();
output.write(data);
output.flush();
output.close();
byte[] buffer=new byte[64 * 1024];
input=dfsClient.open(filename);
input.read(0,buffer,0,1024);
input.close();
assertEquals(3,input.getHedgedReadOpsLoopNumForTesting());
}
catch ( BlockMissingException e) {
assertTrue(false);
}
finally {
Mockito.reset(injector);
IOUtils.cleanup(null,input);
IOUtils.cleanup(null,output);
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
* space of the block is used.
*/
@Test public void testBlockAllocationAdjustsUsageConservatively() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
Path dir=new Path("/test");
Path file1=new Path("/test/test1");
Path file2=new Path("/test/test2");
boolean exceededQuota=false;
final int QUOTA_SIZE=3 * BLOCK_SIZE;
final int FILE_SIZE=BLOCK_SIZE / 2;
ContentSummary c;
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
DFSTestUtil.createFile(fs,file1,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed",QUOTA_SIZE / 2,c.getSpaceConsumed());
try {
DFSTestUtil.createFile(fs,file2,FILE_SIZE,(short)3,1L);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
* This is based on testNamespaceCommands() above.
*/
@Test public void testSpaceCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,"512");
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
int fileLen=1024;
short replication=3;
int fileSpace=fileLen * replication;
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
final Path quotaDir1=new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,4 * fileSpace);
ContentSummary c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(),4 * fileSpace);
final Path quotaDir20=new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,6 * fileSpace);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceQuota(),6 * fileSpace);
final Path quotaDir21=new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir21));
dfs.setQuota(quotaDir21,HdfsConstants.QUOTA_DONT_SET,2 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceQuota(),2 * fileSpace);
Path tempPath=new Path(quotaDir21,"nqdir32");
assertTrue(dfs.mkdirs(tempPath));
DFSTestUtil.createFile(dfs,new Path(tempPath,"fileDir/file1"),fileLen,replication,0);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),fileSpace);
boolean hasException=false;
try {
DFSTestUtil.createFile(dfs,new Path(quotaDir21,"nqdir33/file2"),2 * fileLen,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.delete(new Path(quotaDir21,"nqdir33"),true));
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),fileSpace);
assertEquals(c.getSpaceQuota(),2 * fileSpace);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),0);
Path dstPath=new Path(quotaDir20,"nqdir30");
Path srcPath=new Path(quotaDir21,"nqdir32");
assertTrue(dfs.rename(srcPath,dstPath));
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),fileSpace);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceConsumed(),fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
final Path file2=new Path(dstPath,"fileDir/file2");
int file2Len=2 * fileLen;
DFSTestUtil.createFile(dfs,file2,file2Len,replication,0);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
hasException=false;
try {
assertFalse(dfs.rename(dstPath,srcPath));
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertFalse(dfs.exists(srcPath));
assertTrue(dfs.exists(dstPath));
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(),4 * fileSpace);
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
OutputStream out=dfs.append(file2);
out.write(new byte[fileLen]);
out.close();
file2Len+=fileLen;
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),4 * fileSpace);
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,5 * fileSpace);
out=dfs.append(file2);
hasException=false;
try {
out.write(new byte[fileLen + 1024]);
out.flush();
out.close();
}
catch ( DSQuotaExceededException e) {
hasException=true;
IOUtils.closeStream(out);
}
assertTrue(hasException);
file2Len+=fileLen;
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace);
dfs.setReplication(file2,(short)(replication - 1));
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len);
hasException=false;
try {
dfs.setReplication(file2,(short)(replication + 1));
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len);
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace);
dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace);
dfs.setReplication(file2,(short)(replication + 1));
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace + file2Len);
final Path quotaDir2053=new Path("/hdfs-2053");
assertTrue(dfs.mkdirs(quotaDir2053));
final Path quotaDir2053_A=new Path(quotaDir2053,"A");
assertTrue(dfs.mkdirs(quotaDir2053_A));
final Path quotaDir2053_B=new Path(quotaDir2053,"B");
assertTrue(dfs.mkdirs(quotaDir2053_B));
final Path quotaDir2053_C=new Path(quotaDir2053,"C");
assertTrue(dfs.mkdirs(quotaDir2053_C));
int sizeFactorA=1;
int sizeFactorB=2;
int sizeFactorC=4;
dfs.setQuota(quotaDir2053_C,HdfsConstants.QUOTA_DONT_SET,(sizeFactorC + 1) * fileSpace);
c=dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceQuota(),(sizeFactorC + 1) * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_A,"fileA"),sizeFactorA * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_A);
assertEquals(c.getSpaceConsumed(),sizeFactorA * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_B,"fileB"),sizeFactorB * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_B);
assertEquals(c.getSpaceConsumed(),sizeFactorB * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_C,"fileC"),sizeFactorC * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceConsumed(),sizeFactorC * fileSpace);
c=dfs.getContentSummary(quotaDir2053);
assertEquals(c.getSpaceConsumed(),(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
assertEquals(20,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test public void testMultipleFilesSmallerThanOneBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem());
assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE);
Path dir=new Path("/test");
boolean exceededQuota=false;
ContentSummary c;
final int FILE_SIZE=1024;
final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024,fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024,QUOTA_SIZE);
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
for (int i=0; i < 59; i++) {
Path file=new Path("/test/test" + i);
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed());
assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
try {
Path file=new Path("/test/test59");
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test limit cases for setting space quotas.
*/
@Test public void testMaxSpaceQuotas() throws Exception {
final Configuration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
final Path testFolder=new Path("/testFolder");
assertTrue(dfs.mkdirs(testFolder));
dfs.setQuota(testFolder,Long.MAX_VALUE - 1,10);
ContentSummary c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,10,Long.MAX_VALUE - 1);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getSpaceQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,Long.MAX_VALUE,10);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getQuota() == 10);
dfs.setQuota(testFolder,10,Long.MAX_VALUE);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getSpaceQuota() == 10);
try {
dfs.setQuota(testFolder,Long.MAX_VALUE + 1,10);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
try {
dfs.setQuota(testFolder,10,Long.MAX_VALUE + 1);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test commands that change the size of the name space:
* mkdirs, rename, and delete
*/
@Test public void testNamespaceCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final DistributedFileSystem dfs=cluster.getFileSystem();
try {
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
final Path quotaDir1=new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1,6,HdfsConstants.QUOTA_DONT_SET);
ContentSummary c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),6);
final Path quotaDir2=new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir2,7,HdfsConstants.QUOTA_DONT_SET);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
final Path quotaDir3=new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir3));
dfs.setQuota(quotaDir3,2,HdfsConstants.QUOTA_DONT_SET);
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),1);
assertEquals(c.getQuota(),2);
Path tempPath=new Path(quotaDir3,"nqdir32");
assertTrue(dfs.mkdirs(tempPath));
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),2);
tempPath=new Path(quotaDir3,"nqdir33");
boolean hasException=false;
try {
assertFalse(dfs.mkdirs(tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),2);
tempPath=new Path(quotaDir2,"nqdir31");
assertTrue(dfs.mkdirs(tempPath));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
tempPath=new Path(quotaDir2,"nqdir33");
hasException=false;
try {
assertFalse(dfs.mkdirs(tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
tempPath=new Path(quotaDir2,"nqdir30");
dfs.rename(new Path(quotaDir3,"nqdir32"),tempPath);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
hasException=false;
try {
assertFalse(dfs.rename(tempPath,quotaDir3));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3,"nqdir30")));
hasException=false;
try {
assertFalse(dfs.rename(tempPath,new Path(quotaDir3,"nqdir32")));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3,"nqdir32")));
assertTrue(dfs.rename(tempPath,new Path("/nqdir0")));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),6);
assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
hasException=false;
try {
assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"),tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.rename(quotaDir3,quotaDir2));
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),6);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),7);
tempPath=new Path(quotaDir2,"qdir21");
c=dfs.getContentSummary(tempPath);
assertEquals(c.getDirectoryCount(),1);
assertEquals(c.getQuota(),2);
dfs.delete(tempPath,true);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),6);
assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"),quotaDir2));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),5);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
assertEquals(14,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test quota related commands:
* setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
*/
@Test public void testQuotaCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE=512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSAdmin admin=new DFSAdmin(conf);
try {
final int fileLen=1024;
final short replication=5;
final long spaceQuota=fileLen * replication * 15 / 8;
final Path parent=new Path("/test");
assertTrue(dfs.mkdirs(parent));
String[] args=new String[]{"-setQuota","3",parent.toString()};
runCommand(admin,args,false);
runCommand(admin,false,"-setSpaceQuota","2t",parent.toString());
assertEquals(2L << 40,dfs.getContentSummary(parent).getSpaceQuota());
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota),parent.toString());
final Path childDir0=new Path(parent,"data0");
assertTrue(dfs.mkdirs(childDir0));
final Path childFile0=new Path(parent,"datafile0");
DFSTestUtil.createFile(fs,childFile0,fileLen,replication,0);
ContentSummary c=dfs.getContentSummary(parent);
assertEquals(c.getFileCount() + c.getDirectoryCount(),3);
assertEquals(c.getQuota(),3);
assertEquals(c.getSpaceConsumed(),fileLen * replication);
assertEquals(c.getSpaceQuota(),spaceQuota);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getFileCount() + c.getDirectoryCount(),1);
assertEquals(c.getQuota(),-1);
c=dfs.getContentSummary(parent);
assertEquals(c.getSpaceConsumed(),fileLen * replication);
final Path childDir1=new Path(parent,"data1");
boolean hasException=false;
try {
assertFalse(dfs.mkdirs(childDir1));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
OutputStream fout;
final Path childFile1=new Path(parent,"datafile1");
hasException=false;
try {
fout=dfs.create(childFile1);
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
runCommand(admin,new String[]{"-clrQuota",parent.toString()},false);
c=dfs.getContentSummary(parent);
assertEquals(c.getQuota(),-1);
assertEquals(c.getSpaceQuota(),spaceQuota);
runCommand(admin,new String[]{"-clrQuota",childDir0.toString()},false);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getQuota(),-1);
fout=dfs.create(childFile1,replication);
try {
fout.write(new byte[fileLen]);
fout.close();
Assert.fail();
}
catch ( QuotaExceededException e) {
IOUtils.closeStream(fout);
}
dfs.delete(childFile1,false);
runCommand(admin,false,"-clrSpaceQuota",parent.toString());
c=dfs.getContentSummary(parent);
assertEquals(c.getQuota(),-1);
assertEquals(c.getSpaceQuota(),-1);
DFSTestUtil.createFile(dfs,childFile1,fileLen,replication,0);
args=new String[]{"-setQuota","1",parent.toString()};
runCommand(admin,args,false);
runCommand(admin,false,"-setSpaceQuota",Integer.toString(fileLen),args[2]);
args=new String[]{"-setQuota","1",childDir0.toString()};
runCommand(admin,args,false);
hasException=false;
try {
assertFalse(dfs.mkdirs(new Path(childDir0,"in")));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getDirectoryCount() + c.getFileCount(),1);
assertEquals(c.getQuota(),1);
Path nonExistentPath=new Path("/test1");
assertFalse(dfs.exists(nonExistentPath));
args=new String[]{"-setQuota","1",nonExistentPath.toString()};
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","1g",nonExistentPath.toString());
assertTrue(dfs.isFile(childFile0));
args[1]=childFile0.toString();
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","1t",args[1]);
args[0]="-clrQuota";
runCommand(admin,args,true);
runCommand(admin,true,"-clrSpaceQuota",args[1]);
args[1]=nonExistentPath.toString();
runCommand(admin,args,true);
runCommand(admin,true,"-clrSpaceQuota",args[1]);
args=new String[]{"-setQuota","0",parent.toString()};
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","0",args[2]);
args[1]="-1";
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
args[1]=String.valueOf(Long.MAX_VALUE + 1L);
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
args[1]="33aa1.5";
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
runCommand(admin,true,"-setSpaceQuota",(Long.MAX_VALUE / 1024 / 1024 + 1024) + "m",args[2]);
final String username="userxx";
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(username,new String[]{"groupyy"});
final String[] args2=args.clone();
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
assertEquals("Not running as new user",username,UserGroupInformation.getCurrentUser().getShortUserName());
DFSAdmin userAdmin=new DFSAdmin(conf);
args2[1]="100";
runCommand(userAdmin,args2,true);
runCommand(userAdmin,true,"-setSpaceQuota","1g",args2[2]);
String[] args3=new String[]{"-clrQuota",parent.toString()};
runCommand(userAdmin,args3,true);
runCommand(userAdmin,true,"-clrSpaceQuota",args3[1]);
return null;
}
}
);
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-setQuota","1000000","/");
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-clrSpaceQuota","/");
runCommand(admin,new String[]{"-clrQuota",parent.toString()},false);
runCommand(admin,false,"-clrSpaceQuota",parent.toString());
final Path childDir2=new Path(parent,"data2");
assertTrue(dfs.mkdirs(childDir2));
final Path childFile2=new Path(childDir2,"datafile2");
final Path childFile3=new Path(childDir2,"datafile3");
final long spaceQuota2=DEFAULT_BLOCK_SIZE * replication;
final long fileLen2=DEFAULT_BLOCK_SIZE;
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString());
runCommand(admin,false,"-clrSpaceQuota",childDir2.toString());
DFSTestUtil.createFile(fs,childFile2,fileLen2,replication,0);
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString());
hasException=false;
try {
DFSTestUtil.createFile(fs,childFile3,fileLen2,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
final Path childFile4=new Path("/","datafile2");
final Path childFile5=new Path("/","datafile3");
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-clrSpaceQuota","/");
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/");
runCommand(admin,false,"-clrSpaceQuota","/");
DFSTestUtil.createFile(fs,childFile4,fileLen2,replication,0);
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/");
hasException=false;
try {
DFSTestUtil.createFile(fs,childFile5,fileLen2,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertEquals(4,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1
* mkdir /user/dir2
* move /user/dir1/file1 /user/dir2/
*/
@Test public void testWhileOpenRenameToExistentDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 3************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
fs.mkdirs(dir2);
fs.rename(file1,dir2);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir2","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1 /user/dir2/file2
* mkdir /user/dir3
* move /user/dir1 /user/dir3
*/
@Test public void testWhileOpenRenameParent() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,TestFileCreation.blockSize);
System.out.println("Test 1*****************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
FSEditLog spyLog=spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/a+b/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
Path file2=new Path(dir2,"file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
Path dir3=new Path("/user/dir3");
fs.mkdirs(dir3);
fs.rename(dir1,dir3);
Path file3=new Path(dir3,"file3");
FSDataOutputStream stm3=fs.create(file3);
fs.rename(file3,new Path(dir3,"bozo"));
TestFileCreation.writeFile(stm3,TestFileCreation.blockSize + 1);
stm3.hflush();
cluster.getNameNode().stop();
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir3/dir1","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1 /user/dir2/file2
* move /user/dir1 /user/dir3
*/
@Test public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 2************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
Path file2=new Path(dir2,"file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
Path dir3=new Path("/user/dir3");
fs.rename(dir1,dir3);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir3","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1
* move /user/dir1/file1 /user/dir2/
*/
@Test public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 4************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
fs.rename(file1,dir2);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user","dir2");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPendingReplicationRetry() throws IOException {
MiniDFSCluster cluster=null;
int numDataNodes=4;
String testFile="/replication-test-file";
Path testPath=new Path(testFile);
byte buffer[]=new byte[1024];
for (int i=0; i < buffer.length; i++) {
buffer[i]='1';
}
try {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
OutputStream out=cluster.getFileSystem().create(testPath);
out.write(buffer);
out.close();
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(testFile,0,Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown();
cluster=null;
for (int i=0; i < 25; i++) {
buffer[i]='0';
}
int fileCount=0;
for (int dnIndex=0; dnIndex < 3; dnIndex++) {
File blockFile=MiniDFSCluster.getBlockFile(dnIndex,block);
LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) {
if (fileCount == 0) {
LOG.info("Deleting file " + blockFile);
assertTrue(blockFile.delete());
}
else {
LOG.info("Corrupting file " + blockFile);
long len=blockFile.length();
assertTrue(len > 50);
RandomAccessFile blockOut=new RandomAccessFile(blockFile,"rw");
try {
blockOut.seek(len / 3);
blockOut.write(buffer,0,25);
}
finally {
blockOut.close();
}
}
fileCount++;
}
}
assertEquals(3,fileCount);
LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
conf.set("dfs.datanode.block.write.timeout.sec",Integer.toString(5));
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.75f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
cluster.waitActive();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBadBlockReportOnTransfer() throws Exception {
Configuration conf=new HdfsConfiguration();
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
short replFactor=1;
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
fs=cluster.getFileSystem();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
Path file1=new Path("/tmp/testBadBlockReportOnTransfer/file1");
DFSTestUtil.createFile(fs,file1,1024,replFactor,0);
DFSTestUtil.waitReplication(fs,file1,replFactor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("Corrupted too few blocks",replFactor,blockFilesCorrupted);
replFactor=2;
fs.setReplication(file1,replFactor);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
while (blocks.get(0).isCorrupt() != true) {
try {
LOG.info("Waiting until block is marked as corrupt...");
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
}
replicaCount=blocks.get(0).getLocations().length;
assertTrue(replicaCount == 1);
cluster.shutdown();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=300000) public void testCheckpoint() throws IOException, InterruptedException {
final Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1);
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
queryForPreparation(dfs);
dfs.mkdirs(foo);
long txid=dfs.rollEdits();
Assert.assertTrue(txid > 0);
int retries=0;
while (++retries < 5) {
NNStorage storage=dfsCluster.getNamesystem(1).getFSImage().getStorage();
if (storage.getFsImageName(txid - 1) != null) {
return;
}
Thread.sleep(1000);
}
Assert.fail("new checkpoint does not exist");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testQuery() throws Exception {
final Configuration conf=new Configuration();
MiniQJMHACluster cluster=null;
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfsCluster.shutdownNameNode(1);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
info=dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
Assert.assertFalse(info.createdRollbackImages());
dfsCluster.restartNameNode(1);
queryForPreparation(dfs);
Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test DFSAdmin Upgrade Command.
*/
@Test public void testDFSAdminRollingUpgradeCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
final Path baz=new Path("/baz");
{
final DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
runCmd(dfsadmin,false,"-rollingUpgrade","abc");
runCmd(dfsadmin,true,"-rollingUpgrade");
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
runCmd(dfsadmin,true,"-rollingUpgrade","prepare");
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
runCmd(dfsadmin,true,"-rollingUpgrade","query");
dfs.mkdirs(bar);
runCmd(dfsadmin,true,"-rollingUpgrade","finalize");
dfs.mkdirs(baz);
runCmd(dfsadmin,true,"-rollingUpgrade");
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
}
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRollingUpgradeWithQJM() throws Exception {
String nnDirPrefix=MiniDFSCluster.getBaseDirectory() + "/nn/";
final File nn1Dir=new File(nnDirPrefix + "image1");
final File nn2Dir=new File(nnDirPrefix + "image2");
LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf=new HdfsConfiguration();
final MiniJournalCluster mjc=new MiniJournalCluster.Builder(conf).build();
setConf(conf,nn1Dir,mjc);
{
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.shutdown();
}
MiniDFSCluster cluster2=null;
try {
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
final Path baz=new Path("/baz");
final RollingUpgradeInfo info1;
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
info1=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
LOG.info("START\n" + info1);
Assert.assertEquals(info1,dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar);
cluster.shutdown();
}
final Configuration conf2=setConf(new Configuration(),nn2Dir,mjc);
cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
final DistributedFileSystem dfs2=cluster2.getFileSystem();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertFalse(dfs2.exists(baz));
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
try {
cluster2.restartNameNode("-upgrade");
}
catch ( IOException e) {
LOG.info("The exception is expected.",e);
}
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
final RollingUpgradeInfo finalize=dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE);
LOG.info("FINALIZE: " + finalize);
Assert.assertEquals(info1.getStartTime(),finalize.getStartTime());
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster2.restartNameNode();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
}
finally {
if (cluster2 != null) cluster2.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testFinalize() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
FSImage fsimage=dfsCluster.getNamesystem(0).getFSImage();
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
queryForPreparation(dfs);
Assert.assertTrue(fsimage.hasRollbackFSImage());
info=dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
Assert.assertTrue(info.isFinalized());
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(fsimage.hasRollbackFSImage());
dfsCluster.restartNameNode(0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
DataNode dn=cluster.getDataNodes().get(0);
final String dnAddr=dn.getDatanodeId().getIpcAddr(false);
final String[] args1={"-getDatanodeInfo",dnAddr};
Assert.assertEquals(0,dfsadmin.run(args1));
final String[] args2={"-shutdownDatanode",dnAddr,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args2));
Thread.sleep(2000);
Assert.assertFalse("DataNode should exit",dn.isDatanodeUp());
Assert.assertEquals(-1,dfsadmin.run(args1));
}
finally {
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testDowngrade() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
TestRollingUpgrade.queryForPreparation(dfs);
dfs.close();
dfsCluster.restartNameNode(0,true,"-rollingUpgrade","downgrade");
Assert.assertFalse(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs=dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackWithQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniJournalCluster mjc=null;
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString());
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
dfs.close();
cluster.restartNameNode("-rollingUpgrade","rollback");
dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=mjc.getCurrentDir(0,JOURNAL_ID);
checkJNStorage(dir,4,7);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test rollback scenarios where StandbyNameNode does checkpoints during
* rolling upgrade.
*/
@Test public void testRollbackWithHAQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
dfs.close();
TestRollingUpgrade.queryForPreparation(dfs);
Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage().hasRollbackFSImage());
Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage().hasRollbackFSImage());
dfsCluster.restartNameNode(0,true,"-rollingUpgrade","rollback");
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs=dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
NNStorage storage=dfsCluster.getNamesystem(0).getFSImage().getStorage();
checkNNStorage(storage,4,7);
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=cluster.getJournalCluster().getCurrentDir(0,MiniQJMHACluster.NAMESERVICE);
checkJNStorage(dir,5,7);
}
dfsCluster.restartNameNode(0);
dfsCluster.transitionToActive(0);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false);
cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L);
LOG.info("Stopping all DataNodes");
List dnprops=Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn=cluster.getNameNode();
String status=nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status);
assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
}
}
,10,10000);
final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.",safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.",safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
InternalCallVerifier BooleanVerifier
/**
* This test verifies that if SafeMode is manually entered, name-node does not
* come out of safe mode even after the startup safe mode conditions are met.
*
* - Start cluster with 1 data-node.
* - Create 2 files with replication 1.
* - Re-start cluster with 0 data-nodes.
* Name-node should stay in automatic safe-mode.
* - Enter safe mode manually.
* - Start the data-node.
* - Wait longer than dfs.namenode.safemode.extension and
* verify that the name-node is still in safe mode.
*
* @throws IOException
*/
@Test public void testManualSafeMode() throws IOException {
fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testManualSafeMode/file1");
Path file2=new Path("/tmp/testManualSafeMode/file2");
DFSTestUtil.createFile(fs,file1,1000,(short)1,0);
DFSTestUtil.createFile(fs,file2,1000,(short)1,0);
fs.close();
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
cluster.waitActive();
dfs=cluster.getFileSystem();
assertTrue("No datanode is started. Should be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
try {
Thread.sleep(2000);
}
catch ( InterruptedException ignored) {
}
assertTrue("should still be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
assertFalse("should not be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
final Path file1=new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs,file1,1024,(short)1,0);
assertTrue("Could not enter SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
((DistributedFileSystem)fs).setQuota(file1,1,1);
}
}
);
runFsFun("Set perm while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setPermission(file1,FsPermission.getDefault());
}
}
);
runFsFun("Set owner while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setOwner(file1,"user","group");
}
}
);
runFsFun("Set repl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setReplication(file1,(short)1);
}
}
);
runFsFun("Append file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs,file1,"new bytes");
}
}
);
runFsFun("Delete file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.delete(file1,false);
}
}
);
runFsFun("Rename file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.rename(file1,new Path("file2"));
}
}
);
runFsFun("Set time while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setTimes(file1,0,0);
}
}
);
runFsFun("modifyAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.modifyAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeDefaultAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}
}
);
runFsFun("removeAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAcl(file1);
}
}
);
runFsFun("setAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setAcl(file1,Lists.newArrayList());
}
}
);
runFsFun("setXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setXAttr(file1,"user.a1",null);
}
}
);
runFsFun("removeXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeXAttr(file1,"user.a1");
}
}
);
try {
DFSTestUtil.readFile(fs,file1);
}
catch ( IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
}
catch ( IOException ioe) {
fail("getAclStatus failed while in SM");
}
UserGroupInformation ugiX=UserGroupInformation.createRemoteUser("userX");
FileSystem myfs=ugiX.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
}
);
myfs.access(file1,FsAction.READ);
try {
myfs.access(file1,FsAction.WRITE);
fail("The access call should have failed.");
}
catch ( AccessControlException e) {
}
assertFalse("Could not leave SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testSafeModeWhenZeroBlockLocations() throws IOException {
try {
Path file1=new Path("/tmp/testManualSafeMode/file1");
Path file2=new Path("/tmp/testManualSafeMode/file2");
System.out.println("Created file1 and file2.");
DFSTestUtil.createFile(fs,file1,1000,(short)1,0);
DFSTestUtil.createFile(fs,file2,2000,(short)1,0);
checkGetBlockLocationsWorks(fs,file1);
NameNode namenode=cluster.getNameNode();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("should still be in SafeMode",namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs,file1);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("should not be in SafeMode",namenode.isInSafeMode());
cluster.shutdownDataNodes();
cluster.shutdownNameNode(0);
cluster.restartNameNode();
cluster.waitActive();
System.out.println("Restarted cluster with just the NameNode");
namenode=cluster.getNameNode();
assertTrue("No datanode is started. Should be in SafeMode",namenode.isInSafeMode());
FileStatus stat=fs.getFileStatus(file1);
try {
fs.getFileBlockLocations(stat,0,1000);
assertTrue("Should have got safemode exception",false);
}
catch ( SafeModeException e) {
}
catch ( RemoteException re) {
if (!re.getClassName().equals(SafeModeException.class.getName())) assertTrue("Should have got safemode exception",false);
}
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("Should not be in safemode",namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs,file1);
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf=cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1);
cluster.restartNameNode();
fs=cluster.getFileSystem();
String tipMsg=cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically"));
cluster.startDataNodes(conf,1,true,null,null);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
assertEquals("",cluster.getNamesystem().getSafemode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests mod time change at close in DFS.
*/
@Test public void testTimesAtClose() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
int replicas=1;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
assertTrue(fileSys instanceof DistributedFileSystem);
try {
Path file1=new Path("/simple.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
System.out.println("Created and wrote file simple.dat");
FileStatus statBeforeClose=fileSys.getFileStatus(file1);
long mtimeBeforeClose=statBeforeClose.getModificationTime();
String mdateBeforeClose=dateForm.format(new Date(mtimeBeforeClose));
System.out.println("mtime on " + file1 + " before close is "+ mdateBeforeClose+ " ("+ mtimeBeforeClose+ ")");
assertTrue(mtimeBeforeClose != 0);
stm.close();
System.out.println("Closed file.");
FileStatus statAfterClose=fileSys.getFileStatus(file1);
long mtimeAfterClose=statAfterClose.getModificationTime();
String mdateAfterClose=dateForm.format(new Date(mtimeAfterClose));
System.out.println("mtime on " + file1 + " after close is "+ mdateAfterClose+ " ("+ mtimeAfterClose+ ")");
assertTrue(mtimeAfterClose != 0);
assertTrue(mtimeBeforeClose != mtimeAfterClose);
cleanupFile(fileSys,file1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests mod & access time in DFS.
*/
@Test public void testTimes() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
final int nnport=cluster.getNameNodePort();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
int replicas=1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1=new Path("testdir1");
Path file1=new Path(dir1,"test1.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
FileStatus stat=fileSys.getFileStatus(file1);
long atimeBeforeClose=stat.getAccessTime();
String adate=dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is "+ adate+ " ("+ atimeBeforeClose+ ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat=fileSys.getFileStatus(file1);
long atime1=stat.getAccessTime();
long mtime1=stat.getModificationTime();
adate=dateForm.format(new Date(atime1));
String mdate=dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is "+ adate+ " ("+ atime1+ ")");
System.out.println("mtime on " + file1 + " is "+ mdate+ " ("+ mtime1+ ")");
assertTrue(atime1 != 0);
stat=fileSys.getFileStatus(dir1);
long mdir1=stat.getAccessTime();
assertTrue(mdir1 == 0);
long atime2=atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1,-1,atime2);
stat=fileSys.getFileStatus(file1);
long atime3=stat.getAccessTime();
String adate3=dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is "+ adate3+ " ("+ atime3+ ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
long mtime2=mtime1 - (3600L * 1000L);
fileSys.setTimes(file1,mtime2,-1);
stat=fileSys.getFileStatus(file1);
long mtime3=stat.getModificationTime();
String mdate3=dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is "+ mdate3+ " ("+ mtime3+ ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4=Time.now() - (3600L * 1000L);
long atime4=Time.now();
fileSys.setTimes(dir1,mtime4,atime4);
stat=fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times",mtime4 == stat.getModificationTime());
assertTrue("Not matching the access times",atime4 == stat.getAccessTime());
Path nonExistingDir=new Path(dir1,"/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir,mtime4,atime4);
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
}
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
System.out.println("Verifying times after cluster restart");
stat=fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys,file1);
cleanupFile(fileSys,dir1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEviction() throws IOException {
NfsConfiguration conf=new NfsConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost");
final int MAX_CACHE_SIZE=2;
DFSClientCache cache=new DFSClientCache(conf,MAX_CACHE_SIZE);
DFSClient c1=cache.getDfsClient("test1");
assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1"));
assertEquals(c1,cache.getDfsClient("test1"));
assertFalse(isDfsClientClose(c1));
cache.getDfsClient("test2");
assertTrue(isDfsClientClose(c1));
assertEquals(MAX_CACHE_SIZE - 1,cache.clientCache.size());
}
InternalCallVerifier BooleanVerifier
@Test public void testExportPoint() throws IOException {
NfsConfiguration config=new NfsConfiguration();
MiniDFSCluster cluster=null;
String exportPoint="/myexport1";
config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,exportPoint);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
final Nfs3 nfsServer=new Nfs3(config);
nfsServer.startServiceInternal(false);
Mountd mountd=nfsServer.getMountd();
RpcProgramMountd rpcMount=(RpcProgramMountd)mountd.getRpcProgram();
assertTrue(rpcMount.getExports().size() == 1);
String exportInMountd=rpcMount.getExports().get(0);
assertTrue(exportInMountd.equals(exportPoint));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCompare() throws IOException {
OffsetRange r1=new OffsetRange(0,1);
OffsetRange r2=new OffsetRange(1,3);
OffsetRange r3=new OffsetRange(1,3);
OffsetRange r4=new OffsetRange(3,4);
assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r3));
assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r2));
assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r1) < 0);
assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r4) > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testScan() throws IOException, InterruptedException {
NfsConfiguration conf=new NfsConfiguration();
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,2);
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx context1=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context2=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context3=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context4=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtxCache cache=new OpenFileCtxCache(conf,10 * 60 * 100);
boolean ret=cache.put(new FileHandle(1),context1);
assertTrue(ret);
ret=cache.put(new FileHandle(2),context2);
assertTrue(ret);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 0);
ret=cache.put(new FileHandle(3),context3);
assertTrue(ret);
ret=cache.put(new FileHandle(4),context4);
assertTrue(ret);
context3.setActiveStatusForTest(false);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
assertTrue(cache.size() == 1);
assertTrue(cache.get(new FileHandle(3)) == null);
assertTrue(cache.get(new FileHandle(4)) != null);
}
InternalCallVerifier BooleanVerifier
@Test public void testEviction() throws IOException, InterruptedException {
NfsConfiguration conf=new NfsConfiguration();
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,2);
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx context1=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context2=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context3=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context4=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context5=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtxCache cache=new OpenFileCtxCache(conf,10 * 60 * 100);
boolean ret=cache.put(new FileHandle(1),context1);
assertTrue(ret);
Thread.sleep(1000);
ret=cache.put(new FileHandle(2),context2);
assertTrue(ret);
ret=cache.put(new FileHandle(3),context3);
assertFalse(ret);
assertTrue(cache.size() == 2);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 2);
ret=cache.put(new FileHandle(3),context3);
assertTrue(ret);
assertTrue(cache.size() == 2);
assertTrue(cache.get(new FileHandle(1)) == null);
context3.setActiveStatusForTest(false);
ret=cache.put(new FileHandle(4),context4);
assertTrue(ret);
context2.getPendingWritesForTest().put(new OffsetRange(0,100),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
context4.getPendingCommitsForTest().put(new Long(100),new CommitCtx(0,null,0,attr));
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
ret=cache.put(new FileHandle(5),context5);
assertFalse(ret);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirPlus() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
READDIRPLUS3Response responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 1);
EntryPlus3 entryPlus=direntPlus.get(0);
assertTrue(entryPlus.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirBasic() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
READDIR3Response response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry=dirents.get(0);
assertTrue(entry.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 2);
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=1000) public void testIdempotent(){
Object[][] procedures={{Nfs3Constant.NFSPROC3.NULL,1},{Nfs3Constant.NFSPROC3.GETATTR,1},{Nfs3Constant.NFSPROC3.SETATTR,1},{Nfs3Constant.NFSPROC3.LOOKUP,1},{Nfs3Constant.NFSPROC3.ACCESS,1},{Nfs3Constant.NFSPROC3.READLINK,1},{Nfs3Constant.NFSPROC3.READ,1},{Nfs3Constant.NFSPROC3.WRITE,1},{Nfs3Constant.NFSPROC3.CREATE,0},{Nfs3Constant.NFSPROC3.MKDIR,0},{Nfs3Constant.NFSPROC3.SYMLINK,0},{Nfs3Constant.NFSPROC3.MKNOD,0},{Nfs3Constant.NFSPROC3.REMOVE,0},{Nfs3Constant.NFSPROC3.RMDIR,0},{Nfs3Constant.NFSPROC3.RENAME,0},{Nfs3Constant.NFSPROC3.LINK,0},{Nfs3Constant.NFSPROC3.READDIR,1},{Nfs3Constant.NFSPROC3.READDIRPLUS,1},{Nfs3Constant.NFSPROC3.FSSTAT,1},{Nfs3Constant.NFSPROC3.FSINFO,1},{Nfs3Constant.NFSPROC3.PATHCONF,1},{Nfs3Constant.NFSPROC3.COMMIT,1}};
for ( Object[] procedure : procedures) {
boolean idempotent=procedure[1].equals(Integer.valueOf(1));
Nfs3Constant.NFSPROC3 proc=(Nfs3Constant.NFSPROC3)procedure[0];
if (idempotent) {
Assert.assertTrue(("Procedure " + proc + " should be idempotent"),proc.isIdempotent());
}
else {
Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"),proc.isIdempotent());
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDeprecatedKeys(){
NfsConfiguration conf=new NfsConfiguration();
conf.setInt("nfs3.server.port",998);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,0) == 998);
conf.setInt("nfs3.mountd.port",999);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,0) == 999);
conf.set("dfs.nfs.exports.allowed.hosts","host1");
assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY).equals("host1"));
conf.setInt("dfs.nfs.exports.cache.expirytime.millis",1000);
assertTrue(conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,0) == 1000);
conf.setInt("hadoop.nfs.userupdate.milly",10);
assertTrue(conf.getInt(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,0) == 10);
conf.set("dfs.nfs3.dump.dir","/nfs/tmp");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals("/nfs/tmp"));
conf.setBoolean("dfs.nfs3.enableDump",false);
assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY,true) == false);
conf.setInt("dfs.nfs3.max.open.files",500);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,0) == 500);
conf.setInt("dfs.nfs3.stream.timeout",6000);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY,0) == 6000);
conf.set("dfs.nfs3.export.point","/dir1");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckCommit() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
COMMIT_STATUS ret;
ctx.setActiveStatusForTest(false);
Channel ch=Mockito.mock(Channel.class);
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long)10);
COMMIT_STATUS status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,5,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
status=ctx.checkCommitInternal(10,ch,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,10,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
ret=ctx.checkCommit(dfsClient,11,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key=commits.firstKey();
Assert.assertTrue(key == 11);
commits.remove(new Long(11));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key=commits.firstKey();
Assert.assertTrue(key == 9);
ctx.getPendingWritesForTest().remove(new OffsetRange(5,10));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test public void testAlterWriteRequest() throws IOException {
int len=20;
byte[] data=new byte[len];
ByteBuffer buffer=ByteBuffer.wrap(data);
for (int i=0; i < len; i++) {
buffer.put((byte)i);
}
buffer.flip();
int originalCount=buffer.array().length;
WRITE3Request request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
WriteCtx writeCtx1=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),WriteCtx.INVALID_ORIGINAL_COUNT,request.getStableHow(),request.getData(),null,1,false,WriteCtx.DataState.NO_DUMP);
Assert.assertTrue(writeCtx1.getData().array().length == originalCount);
OpenFileCtx.alterWriteRequest(request,12);
WriteCtx writeCtx2=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
ByteBuffer appendedData=writeCtx2.getData();
int position=appendedData.position();
int limit=appendedData.limit();
Assert.assertTrue(position == 12);
Assert.assertTrue(limit - position == 8);
Assert.assertTrue(appendedData.get(position) == (byte)12);
Assert.assertTrue(appendedData.get(position + 1) == (byte)13);
Assert.assertTrue(appendedData.get(position + 2) == (byte)14);
Assert.assertTrue(appendedData.get(position + 7) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,1);
WriteCtx writeCtx3=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx3.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 1);
Assert.assertTrue(limit - position == 19);
Assert.assertTrue(appendedData.get(position) == (byte)1);
Assert.assertTrue(appendedData.get(position + 18) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,19);
WriteCtx writeCtx4=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx4.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 19);
Assert.assertTrue(limit - position == 1);
Assert.assertTrue(appendedData.get(position) == (byte)19);
}
InternalCallVerifier BooleanVerifier
@Test public void testOOOWrites() throws IOException, InterruptedException {
NfsConfiguration config=new NfsConfiguration();
MiniDFSCluster cluster=null;
RpcProgramNfs3 nfsd;
final int bufSize=32;
final int numOOO=3;
SecurityHandler securityHandler=Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser=System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser),"*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3=new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd=(RpcProgramNfs3)nfs3.getRpcProgram();
DFSClient dfsClient=new DFSClient(NameNode.getAddress(config),config);
HdfsFileStatus status=dfsClient.getFileInfo("/");
FileHandle rootHandle=new FileHandle(status.getFileId());
CREATE3Request createReq=new CREATE3Request(rootHandle,"out-of-order-write" + System.currentTimeMillis(),Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr=new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp=nfsd.create(createXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle=createRsp.getObjHandle();
byte[][] oooBuf=new byte[numOOO][bufSize];
for (int i=0; i < numOOO; i++) {
Arrays.fill(oooBuf[i],(byte)i);
}
for (int i=0; i < numOOO; i++) {
final long offset=(numOOO - 1 - i) * bufSize;
WRITE3Request writeReq=new WRITE3Request(handle,offset,bufSize,WriteStableHow.UNSTABLE,ByteBuffer.wrap(oooBuf[i]));
XDR writeXdr=new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
}
waitWrite(nfsd,handle,60000);
READ3Request readReq=new READ3Request(handle,bufSize,bufSize);
XDR readXdr=new XDR();
readReq.serialize(readXdr);
READ3Response readRsp=nfsd.read(readXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(oooBuf[1],readRsp.getData().array()));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testWriteStableHow() throws IOException, InterruptedException {
NfsConfiguration config=new NfsConfiguration();
DFSClient client=null;
MiniDFSCluster cluster=null;
RpcProgramNfs3 nfsd;
SecurityHandler securityHandler=Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser=System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser),"*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
client=new DFSClient(NameNode.getAddress(config),config);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
Nfs3 nfs3=new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd=(RpcProgramNfs3)nfs3.getRpcProgram();
HdfsFileStatus status=client.getFileInfo("/");
FileHandle rootHandle=new FileHandle(status.getFileId());
CREATE3Request createReq=new CREATE3Request(rootHandle,"file1",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr=new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp=nfsd.create(createXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle=createRsp.getObjHandle();
byte[] buffer=new byte[10];
for (int i=0; i < 10; i++) {
buffer[i]=(byte)i;
}
WRITE3Request writeReq=new WRITE3Request(handle,0,10,WriteStableHow.DATA_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr=new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle,60000);
READ3Request readReq=new READ3Request(handle,0,10);
XDR readXdr=new XDR();
readReq.serialize(readXdr);
READ3Response readRsp=nfsd.read(readXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp.getData().array()));
CREATE3Request createReq2=new CREATE3Request(rootHandle,"file2",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr2=new XDR();
createReq2.serialize(createXdr2);
CREATE3Response createRsp2=nfsd.create(createXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle2=createRsp2.getObjHandle();
WRITE3Request writeReq2=new WRITE3Request(handle2,0,10,WriteStableHow.FILE_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr2=new XDR();
writeReq2.serialize(writeXdr2);
nfsd.write(writeXdr2.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle2,60000);
READ3Request readReq2=new READ3Request(handle2,0,10);
XDR readXdr2=new XDR();
readReq2.serialize(readXdr2);
READ3Response readRsp2=nfsd.read(readXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp2.getData().array()));
status=client.getFileInfo("/file2");
assertTrue(status.getLen() == 10);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckCommitFromRead() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
NfsConfiguration config=new NfsConfiguration();
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(config));
FileHandle h=new FileHandle(1);
COMMIT_STATUS ret;
WriteManager wm=new WriteManager(new IdUserGroup(config),config,false);
assertTrue(wm.addOpenFileStream(h,ctx));
ctx.setActiveStatusForTest(false);
Channel ch=Mockito.mock(Channel.class);
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0));
ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE,ret);
assertEquals(Nfs3Status.NFS3ERR_IO,wm.commitBeforeRead(dfsClient,h,0));
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long)10);
COMMIT_STATUS status=ctx.checkCommitInternal(5,ch,1,attr,false);
assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC,status);
ret=ctx.checkCommit(dfsClient,5,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,5));
status=ctx.checkCommitInternal(10,ch,1,attr,true);
assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,10,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,10));
ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest();
assertTrue(commits.size() == 0);
ret=ctx.checkCommit(dfsClient,11,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret);
assertEquals(0,commits.size());
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,11));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret);
assertEquals(0,commits.size());
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,0));
ctx.getPendingWritesForTest().remove(new OffsetRange(5,10));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0));
}
InternalCallVerifier BooleanVerifier
@Test public void testCheckCommitAixCompatMode() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()),true);
Mockito.when(fos.getPos()).thenReturn((long)2);
COMMIT_STATUS status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
Mockito.when(fos.getPos()).thenReturn((long)10);
status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
BooleanVerifier
@Test public void checkAnnotations(){
Method[] methods=NamenodeProtocols.class.getMethods();
for ( Method m : methods) {
Assert.assertTrue("Idempotent or AtMostOnce annotation is not present " + m,m.isAnnotationPresent(Idempotent.class) || m.isAnnotationPresent(AtMostOnce.class));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure NameNode.Feature support previous features
*/
@Test public void testNameNodeFeature(){
final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE;
assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
BooleanVerifier
/**
* Test to make sure 0.20.203 supports delegation token
*/
@Test public void testRelease203(){
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,Feature.RESERVED_REL20_203.getInfo().getLayoutVersion()));
}
BooleanVerifier
/**
* Test to make sure 0.20.204 supports delegation token
*/
@Test public void testRelease204(){
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,Feature.RESERVED_REL20_204.getInfo().getLayoutVersion()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure DataNode.Feature support previous features
*/
@Test public void testDataNodeFeature(){
final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT;
assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
BooleanVerifier
/**
* Test to make sure release 1.2.0 support CONCAT
*/
@Test public void testRelease1_2_0(){
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.CONCAT,Feature.RESERVED_REL1_2_0.getInfo().getLayoutVersion()));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testNewNamenodeTakesOverWriter() throws Exception {
File nn1Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
File nn2Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn1Dir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.shutdown();
try {
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.getFileSystem().mkdirs(TEST_PATH);
Configuration conf2=new Configuration();
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn2Dir.getAbsolutePath());
conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
try {
assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
}
finally {
cluster2.shutdown();
}
try {
cluster.getFileSystem().mkdirs(new Path("/x"));
fail("Did not abort trying to write to a fenced NN");
}
catch ( RemoteException re) {
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage",re);
}
}
finally {
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLogAndRestart() throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).build();
try {
cluster.getFileSystem().mkdirs(TEST_PATH);
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
cluster.getFileSystem().mkdirs(TEST_PATH_2);
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
assertTrue(cluster.getFileSystem().exists(TEST_PATH_2));
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSingleThreaded() throws IOException {
Configuration conf=new Configuration();
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
URI uri=cluster.getQuorumJournalURI(JID);
QuorumJournalManager qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO);
try {
qjm.format(FAKE_NSINFO);
}
finally {
qjm.close();
}
try {
for (int i=0; i < 5; i++) {
qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO);
try {
qjm.createNewUniqueEpoch();
assertEquals(i + 1,qjm.getLoggerSetForTests().getEpoch());
}
finally {
qjm.close();
}
}
long prevEpoch=5;
for (int i=0; i < 20; i++) {
long newEpoch=-1;
while (true) {
qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO,new FaultyLoggerFactory());
try {
qjm.createNewUniqueEpoch();
newEpoch=qjm.getLoggerSetForTests().getEpoch();
break;
}
catch ( IOException ioe) {
}
finally {
qjm.close();
}
}
LOG.info("Created epoch " + newEpoch);
assertTrue("New epoch " + newEpoch + " should be greater than previous "+ prevEpoch,newEpoch > prevEpoch);
prevEpoch=newEpoch;
}
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that, if the remote node gets unsynchronized (eg some edits were
* missed or the node rebooted), the client stops sending edits until
* the next roll. Test for HDFS-3726.
*/
@Test public void testStopSendingEditsWhenOutOfSync() throws Exception {
Mockito.doThrow(new IOException("injected error")).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
try {
ch.sendEdits(1L,1L,1,FAKE_DATA).get();
fail("Injected JOOSE did not cause sendEdits() to throw");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("injected",ee);
}
Mockito.verify(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
assertTrue(ch.isOutOfSync());
try {
ch.sendEdits(1L,2L,1,FAKE_DATA).get();
fail("sendEdits() should throw until next roll");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("disabled until next roll",ee.getCause());
}
Mockito.verify(mockProxy,Mockito.never()).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(2L),Mockito.eq(1),Mockito.same(FAKE_DATA));
Mockito.verify(mockProxy).heartbeat(Mockito.any());
ch.startLogSegment(3L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
assertFalse(ch.isOutOfSync());
ch.sendEdits(3L,3L,1,FAKE_DATA).get();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Sets up two of the nodes to each drop a single RPC, at all
* possible combinations of RPCs. This may result in the
* active writer failing to write. After this point, a new writer
* should be able to recover and continue writing without
* data loss.
*/
@Test public void testRecoverAfterDoubleFailures() throws Exception {
final long MAX_IPC_NUMBER=determineMaxIpcNumber();
for (int failA=1; failA <= MAX_IPC_NUMBER; failA++) {
for (int failB=1; failB <= MAX_IPC_NUMBER; failB++) {
String injectionStr="(" + failA + ", "+ failB+ ")";
LOG.info("\n\n-------------------------------------------\n" + "Beginning test, failing at " + injectionStr + "\n"+ "-------------------------------------------\n\n");
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjm=null;
try {
qjm=createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
List loggers=qjm.getLoggerSetForTests().getLoggersForTests();
failIpcNumber(loggers.get(0),failA);
failIpcNumber(loggers.get(1),failB);
int lastAckedTxn=doWorkload(cluster,qjm);
if (lastAckedTxn < 6) {
LOG.info("Failed after injecting failures at " + injectionStr + ". This is expected since we injected a failure in the "+ "majority.");
}
qjm.close();
qjm=null;
qjm=createInjectableQJM(cluster);
long lastRecoveredTxn=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(lastRecoveredTxn >= lastAckedTxn);
writeSegment(cluster,qjm,lastRecoveredTxn + 1,3,true);
}
catch ( Throwable t) {
throw new RuntimeException("Test failed with injection: " + injectionStr,t);
}
finally {
cluster.shutdown();
cluster=null;
IOUtils.closeStream(qjm);
qjm=null;
}
}
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier
/**
* Test case in which three JournalNodes randomly flip flop between
* up and down states every time they get an RPC.
* The writer keeps track of the latest ACKed edit, and on every
* recovery operation, ensures that it recovers at least to that
* point or higher. Since at any given point, a majority of JNs
* may be injecting faults, any writer operation is allowed to fail,
* so long as the exception message indicates it failed due to injected
* faults.
* Given a random seed, the test should be entirely deterministic.
*/
@Test public void testRandomized() throws Exception {
long seed;
Long userSpecifiedSeed=Long.getLong(RAND_SEED_PROPERTY);
if (userSpecifiedSeed != null) {
LOG.info("Using seed specified in system property");
seed=userSpecifiedSeed;
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
}
else {
seed=new Random().nextLong();
}
LOG.info("Random seed: " + seed);
Random r=new Random(seed);
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjmForInitialFormat=createInjectableQJM(cluster);
qjmForInitialFormat.format(FAKE_NSINFO);
qjmForInitialFormat.close();
try {
long txid=0;
long lastAcked=0;
for (int i=0; i < NUM_WRITER_ITERS; i++) {
LOG.info("Starting writer " + i + "\n-------------------");
QuorumJournalManager qjm=createRandomFaultyQJM(cluster,r);
try {
long recovered;
try {
recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
}
catch ( Throwable t) {
LOG.info("Failed recovery",t);
checkException(t);
continue;
}
assertTrue("Recovered only up to txnid " + recovered + " but had gotten an ack for "+ lastAcked,recovered >= lastAcked);
txid=recovered + 1;
if (txid > 100 && i % 10 == 1) {
qjm.purgeLogsOlderThan(txid - 100);
}
Holder thrown=new Holder(null);
for (int j=0; j < SEGMENTS_PER_WRITER; j++) {
lastAcked=writeSegmentUntilCrash(cluster,qjm,txid,4,thrown);
if (thrown.held != null) {
LOG.info("Failed write",thrown.held);
checkException(thrown.held);
break;
}
txid+=4;
}
}
finally {
qjm.close();
}
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testPurgeLogs() throws Exception {
for (int txid=1; txid <= 5; txid++) {
writeSegment(cluster,qjm,txid,1,true);
}
File curDir=cluster.getCurrentDir(0,JID);
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getFinalizedEditsFileName(1,1),NNStorage.getFinalizedEditsFileName(2,2),NNStorage.getFinalizedEditsFileName(3,3),NNStorage.getFinalizedEditsFileName(4,4),NNStorage.getFinalizedEditsFileName(5,5));
File paxosDir=new File(curDir,"paxos");
GenericTestUtils.assertExists(paxosDir);
assertTrue(new File(paxosDir,"1").createNewFile());
assertTrue(new File(paxosDir,"3").createNewFile());
GenericTestUtils.assertGlobEquals(paxosDir,"\\d+","1","3");
assertTrue(new File(curDir,"edits_inprogress_0000000000000000001.epoch=140").createNewFile());
assertTrue(new File(curDir,"edits_inprogress_0000000000000000002.empty").createNewFile());
qjm.purgeLogsOlderThan(3);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getFinalizedEditsFileName(3,3),NNStorage.getFinalizedEditsFileName(4,4),NNStorage.getFinalizedEditsFileName(5,5));
GenericTestUtils.assertGlobEquals(paxosDir,"\\d+","3");
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Regression test for HDFS-3891: selectInputStreams should throw
* an exception when a majority of journalnodes have crashed.
*/
@Test public void testSelectInputStreamsMajorityDown() throws Exception {
cluster.shutdown();
List streams=Lists.newArrayList();
try {
qjm.selectInputStreams(streams,0,false);
fail("Did not throw IOE");
}
catch ( QuorumException ioe) {
GenericTestUtils.assertExceptionContains("Got too many exceptions",ioe);
assertTrue(streams.isEmpty());
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFormat() throws Exception {
QuorumJournalManager qjm=closeLater(new QuorumJournalManager(conf,cluster.getQuorumJournalURI("testFormat-jid"),FAKE_NSINFO));
assertFalse(qjm.hasSomeData());
qjm.format(FAKE_NSINFO);
assertTrue(qjm.hasSomeData());
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=20000) public void testCrashBetweenSyncLogAndPersistPaxosData() throws Exception {
JournalFaultInjector faultInjector=JournalFaultInjector.instance=Mockito.mock(JournalFaultInjector.class);
setupLoggers345();
qjm=createSpyingQJM();
spies=qjm.getLoggerSetForTests().getLoggersForTests();
cluster.getJournalNode(2).stopAndJoin(0);
injectIOE().when(spies.get(1)).acceptRecovery(Mockito.any(),Mockito.any());
tryRecoveryExpectingFailure();
cluster.restartJournalNode(2);
qjm=createSpyingQJM();
spies=qjm.getLoggerSetForTests().getLoggersForTests();
injectIOE().when(spies.get(0)).prepareRecovery(Mockito.eq(1L));
Mockito.doThrow(new IOException("Injected")).when(faultInjector).beforePersistPaxosData();
tryRecoveryExpectingFailure();
Mockito.reset(faultInjector);
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
try {
long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(recovered >= 4);
}
finally {
qjm.close();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testQuorumOutputStreamReport() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
QuorumOutputStream os=(QuorumOutputStream)qjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
String report=os.generateReport();
Assert.assertFalse("Report should be plain text",report.contains("<"));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testEpochHandling() throws Exception {
assertEquals(0,journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch=journal.newEpoch(FAKE_NSINFO,1);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(1,journal.getLastPromisedEpoch());
journal.newEpoch(FAKE_NSINFO,3);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(3,journal.getLastPromisedEpoch());
try {
journal.newEpoch(FAKE_NSINFO,3);
fail("Should have failed to promise same epoch twice");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Proposed epoch 3 <= last promise 3",ioe);
}
try {
journal.startLogSegment(makeRI(1),12345L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
try {
journal.journal(makeRI(1),12345L,100L,0,new byte[0]);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormatResetsCachedValues() throws Exception {
journal.newEpoch(FAKE_NSINFO,12345L);
journal.startLogSegment(new RequestInfo(JID,12345L,1L,0L),1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertEquals(12345L,journal.getLastPromisedEpoch());
assertEquals(12345L,journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
journal.close();
journal.format(FAKE_NSINFO_2);
assertEquals(0,journal.getLastPromisedEpoch());
assertEquals(0,journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test whether JNs can correctly handle editlog that cannot be decoded.
*/
@Test public void testScanEditLog() throws Exception {
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
final int numTxns=5;
byte[] ops=QJMTestUtil.createGabageTxns(1,5);
journal.journal(makeRI(2),1,1,numTxns,ops);
SegmentStateProto segmentState=journal.getSegmentInfo(1);
assertTrue(segmentState.getIsInProgress());
Assert.assertEquals(numTxns,segmentState.getEndTxId());
Assert.assertEquals(1,segmentState.getStartTxId());
journal.finalizeLogSegment(makeRI(3),1,numTxns);
segmentState=journal.getSegmentInfo(1);
assertFalse(segmentState.getIsInProgress());
Assert.assertEquals(numTxns,segmentState.getEndTxId());
Assert.assertEquals(1,segmentState.getStartTxId());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the JournalNode performs correctly as a Paxos
* Acceptor process.
*/
@Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception {
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
}
catch ( ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch",ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get();
prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
ch.newEpoch(2);
ch.setEpoch(2);
prep=ch.prepareRecovery(1L).get();
assertEquals(1L,prep.getAcceptedInEpoch());
assertEquals(1L,prep.getSegmentState().getEndTxId());
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testHttpServer() throws Exception {
String urlRoot=jn.getHttpServerURI();
String pageContents=DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
assertTrue("Bad contents: " + pageContents,pageContents.contains("Hadoop:service=JournalNode,name=JvmMetrics"));
byte[] EDITS_DATA=QJMTestUtil.createTxnData(1,3);
IPCLoggerChannel ch=new IPCLoggerChannel(conf,FAKE_NSINFO,journalId,jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1,3,EDITS_DATA).get();
ch.finalizeLogSegment(1,3).get();
byte[] retrievedViaHttp=DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId));
byte[] expected=Bytes.concat(Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),(new byte[]{0,0,0,0}),EDITS_DATA);
assertArrayEquals(expected,retrievedViaHttp);
URL badUrl=new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId);
HttpURLConnection connection=(HttpURLConnection)badUrl.openConnection();
try {
assertEquals(404,connection.getResponseCode());
}
finally {
connection.disconnect();
}
}
BooleanVerifier
@Test(timeout=100000) public void testFailToStartWithBadConfig() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,"non-absolute-path");
assertJNFailsToStart(conf,"should be an absolute path");
File existingFile=new File(TEST_BUILD_DATA,"testjournalnodefile");
assertTrue(existingFile.createNewFile());
try {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,existingFile.getAbsolutePath());
assertJNFailsToStart(conf,"Not a directory");
}
finally {
existingFile.delete();
}
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist");
assertJNFailsToStart(conf,"Can not create directory");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJournalNodeMXBean() throws Exception {
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo");
String journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
assertFalse(journalStatus.contains(NAMESERVICE));
final NamespaceInfo FAKE_NSINFO=new NamespaceInfo(12345,"mycluster","my-bp",0L);
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
Map> jMap=new HashMap>();
Map infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
jCluster=new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build();
jn=jCluster.getJournalNode(0);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
jMap=new HashMap>();
infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
}
InternalCallVerifier BooleanVerifier
/**
* Test that the delegation token secret manager only runs when the
* NN is out of safe mode. This is because the secret manager
* has to log to the edit log, which should not be written in
* safe mode. Regression test for HDFS-2579.
*/
@Test public void testDTManagerInSafeMode() throws Exception {
cluster.startDataNodes(config,1,true,StartupOption.REGULAR,null);
FileSystem fs=cluster.getFileSystem();
for (int i=0; i < 5; i++) {
DFSTestUtil.createFile(fs,new Path("/test-" + i),100,(short)1,1L);
}
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,500);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,30000);
cluster.setWaitSafeMode(false);
cluster.restartNameNode();
NameNode nn=cluster.getNameNode();
assertTrue(nn.isInSafeMode());
DelegationTokenSecretManager sm=NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode",sm.isRunning());
NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited",sm.isRunning());
LOG.info("========= entering safemode again");
NameNodeAdapter.enterSafeMode(nn,false);
assertFalse("Secret manager should stop again when safe mode " + "is manually entered",sm.isRunning());
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
cluster.setWaitSafeMode(true);
cluster.restartNameNode();
nn=cluster.getNameNode();
sm=NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenSecretManager() throws Exception {
Token token=generateDelegationToken("SomeUser","JobTracker");
try {
dtSecretManager.renewToken(token,"FakeRenewer");
Assert.fail("should have failed");
}
catch ( AccessControlException ace) {
}
dtSecretManager.renewToken(token,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
try {
dtSecretManager.retrievePassword(identifier);
Assert.fail("Token should have expired");
}
catch ( InvalidToken e) {
}
dtSecretManager.renewToken(token,"JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token,"JobTracker");
Assert.fail("should have been expired");
}
catch ( InvalidToken it) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception {
Configuration conf=new HdfsConfiguration();
TestBalancer.initConf(conf);
long newNodeCapacity=TestBalancer.CAPACITY;
String newNodeRack=TestBalancer.RACK2;
String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1};
long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY};
assertEquals(capacities.length,racks.length);
int numOfDatanodes=capacities.length;
NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
Configuration copiedConf=new Configuration(conf);
cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster,conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1);
cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity});
totalCapacity+=newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster);
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
assertEquals(1,namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddStorage() throws Exception {
BlockInfo blockInfo=new BlockInfo(3);
final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1");
boolean added=blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage,blockInfo.getStorageInfo(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRead() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final NamenodeProtocols nnProto=nn.getRpcServer();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToRead=new Path(FILE_TO_READ);
FileSystem fs=cluster.getFileSystem();
createFile(fs,fileToRead);
FSDataInputStream in1=fs.open(fileToRead);
assertTrue(checkFile1(in1));
FSDataInputStream in2=fs.open(fileToRead);
assertTrue(checkFile1(in2));
FSDataInputStream in3=fs.open(fileToRead);
assertTrue(checkFile2(in3));
DFSClient client=null;
try {
client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
}
finally {
if (client != null) client.close();
}
List locatedBlocks=nnProto.getBlockLocations(FILE_TO_READ,0,FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock=locatedBlocks.get(0);
Token myToken=lblock.getBlockToken();
assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,true);
while (!SecurityTestUtil.isBlockTokenExpired(myToken)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,true);
ExtendedBlock wrongBlock=new ExtendedBlock(lblock.getBlock().getBlockPoolId(),lblock.getBlock().getBlockId() + 1);
lblock.setBlockToken(sm.generateToken(wrongBlock,EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,BlockTokenSecretManager.AccessMode.COPY,BlockTokenSecretManager.AccessMode.REPLACE)));
tryRead(conf,lblock,false);
SecurityTestUtil.setBlockTokenLifetime(sm,600 * 1000L);
List lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
List lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(in2.seekToNewSource(0));
assertTrue(checkFile1(in2));
List lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
in1.seek(0);
assertFalse(checkFile1(in1));
assertFalse(checkFile2(in3));
cluster.restartNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testAppend() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToAppend=new Path(FILE_TO_APPEND);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToAppend,(short)numDataNodes,BLOCK_SIZE);
stm.write(rawData,0,1);
stm.close();
stm=fs.append(fileToAppend);
int mid=rawData.length - 1;
stm.write(rawData,1,mid - 1);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in5=fs.open(fileToAppend);
assertTrue(checkFile1(in5));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testWrite() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToWrite=new Path(FILE_TO_WRITE);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToWrite,(short)numDataNodes,BLOCK_SIZE);
int mid=rawData.length - 1;
stm.write(rawData,0,mid);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in4=fs.open(fileToWrite);
assertTrue(checkFile1(in4));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
int fileLen=512;
final Path filePath=new Path("/testFile");
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,fileLen,REPLICATION_FACTOR,1L);
final String fileContent=DFSTestUtil.readFile(fs,filePath);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
int dnToCorrupt=DFSTestUtil.firstDnWithBlock(cluster,b);
assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt,b));
cluster.restartDataNode(dnToCorrupt);
DFSTestUtil.waitCorruptReplicas(fs,ns,filePath,b,1);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
for (int i=0; i < racks.length; i++) {
String blockContent=cluster.readBlockOnDataNode(i,b);
if (blockContent != null && i != dnToCorrupt) {
assertEquals("Corrupt replica",fileContent,blockContent);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=5;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
String racks[]={"/rack1","/rack2","/rack1","/rack1","/rack1"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
REPLICATION_FACTOR=2;
fs.setReplication(filePath,REPLICATION_FACTOR);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
for ( String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name=top.substring("/rack1".length() + 1);
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
break;
}
}
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testSingleList(){
DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003));
CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)};
Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext());
Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[0]));
Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next());
Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[1]));
Iterator iter=dn.getCached().iterator();
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().remove(blocks[0]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
dn.getCached().clear();
Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException {
CorruptReplicasMap crm=new CorruptReplicasMap();
assertEquals("Number of corrupt blocks must initially be 0",0,crm.size());
assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null));
assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null));
long[] l=crm.getCorruptReplicaBlockIds(0,null);
assertNotNull("n = 0 must return non-null",l);
assertEquals("n = 0 must return an empty list",0,l.length);
int NUM_BLOCK_IDS=140;
List block_ids=new LinkedList();
for (int i=0; i < NUM_BLOCK_IDS; i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm,getBlock(0),dn1);
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn1);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn2);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",0,crm.size());
for ( Long block_id : block_ids) {
addToCorruptReplicasMap(crm,getBlock(block_id),dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size());
assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null)));
LOG.info(crm.getCorruptReplicaBlockIds(10,7L));
LOG.info(block_ids.subList(7,18));
assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd=BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0,dd.numBlocks());
BlockInfo blk=new BlockInfo(new Block(1L),1);
BlockInfo blk1=new BlockInfo(new Block(2L),2);
DatanodeStorageInfo[] storages=dd.getStorageInfos();
assertTrue(storages.length > 0);
final String storageID=storages[0].getStorageID();
assertTrue(storages[0].addBlock(blk));
assertEquals(1,dd.numBlocks());
assertFalse(dd.removeBlock(blk1));
assertEquals(1,dd.numBlocks());
assertFalse(storages[0].addBlock(blk));
assertEquals(1,dd.numBlocks());
assertTrue(storages[0].addBlock(blk1));
assertEquals(2,dd.numBlocks());
assertTrue(dd.removeBlock(blk));
assertEquals(1,dd.numBlocks());
assertTrue(dd.removeBlock(blk1));
assertEquals(0,dd.numBlocks());
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
assertFalse(map.contains(null));
assertFalse(map.contains(nodeNotInMap));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4");
assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));
assertTrue(map.getDatanodeByHost("1.1.1.1.") == null);
assertTrue(map.getDatanodeByHost("2.2.2.2") == dataNodes[1]);
DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
assertTrue(map.remove(dataNodes[2]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
assertEquals(map.getDatanodeByHost("3.3.3.3"),dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
assertNull(map.getDatanodeByHost("3.3.3.3"));
assertFalse(map.remove(null));
assertTrue(map.remove(dataNodes[1]));
assertFalse(map.remove(dataNodes[1]));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetDatanodeByHost() throws Exception {
assertEquals(map.getDatanodeByHost("1.1.1.1"),dataNodes[0]);
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
}
InternalCallVerifier BooleanVerifier
@Test public void testRelation(){
HostFileManager.HostSet s=new HostFileManager.HostSet();
s.add(entry("127.0.0.1:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
Assert.assertFalse(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.1"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.2:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertTrue(s.match(entry("127.0.0.2:123")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2:123")));
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeCount() throws Exception {
final Configuration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
try {
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
final FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/testfile");
DFSTestUtil.createFile(fs,FILE_PATH,1L,REPLICATION_FACTOR,1L);
DFSTestUtil.waitReplication(fs,FILE_PATH,REPLICATION_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,FILE_PATH);
final DatanodeDescriptor[] datanodes=hm.getDatanodes();
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
DatanodeDescriptor datanode=datanodes[0];
DataNodeProperties dnprop=cluster.stopDataNode(datanode.getXferAddr());
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),datanode.getXferAddr());
DFSTestUtil.waitReplication(fs,FILE_PATH,REPLICATION_FACTOR);
cluster.restartDataNode(dnprop);
cluster.waitActive();
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).excessReplicas() == 0) {
checkTimeout("excess replicas not detected");
}
DatanodeDescriptor nonExcessDN=null;
for ( DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) {
final DatanodeDescriptor dn=storage.getDatanodeDescriptor();
Collection blocks=bm.excessReplicateMap.get(dn.getDatanodeUuid());
if (blocks == null || !blocks.contains(block.getLocalBlock())) {
nonExcessDN=dn;
break;
}
}
assertTrue(nonExcessDN != null);
dnprop=cluster.stopDataNode(nonExcessDN.getXferAddr());
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),nonExcessDN.getXferAddr());
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).liveReplicas() != REPLICATION_FACTOR) {
checkTimeout("live replica count not correct",1000);
}
cluster.restartDataNode(dnprop);
cluster.waitActive();
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).excessReplicas() != 2) {
checkTimeout("excess replica count not equal to 2");
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test processOverReplicatedBlock can handle corrupt replicas fine.
* It make sure that it won't treat corrupt replicas as valid ones
* thus prevents NN deleting valid replicas but keeping
* corrupt ones.
*/
@Test public void testProcesOverReplicateBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
DataNodeProperties dnProps=cluster.stopDataNode(0);
File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev");
for (int i=0; !scanLog.delete(); i++) {
assertTrue("Could not delete log file in one minute",i < 60);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
}
cluster.restartDataNode(dnProps);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
String blockPoolId=cluster.getNamesystem().getBlockPoolId();
final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId);
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
try {
namesystem.writeLock();
synchronized (hm) {
String corruptMachineName=corruptDataNode.getXferAddr();
for ( DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L);
datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0);
}
}
NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1);
assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
}
finally {
namesystem.writeUnlock();
}
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPendingReplication(){
PendingReplicationBlocks pendingReplications;
pendingReplications=new PendingReplicationBlocks(TIMEOUT * 1000);
pendingReplications.start();
DatanodeStorageInfo[] storages=DFSTestUtil.createDatanodeStorageInfos(10);
for (int i=0; i < storages.length; i++) {
Block block=new Block(i,i,0);
DatanodeStorageInfo[] targets=new DatanodeStorageInfo[i];
System.arraycopy(storages,0,targets,0,i);
pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(targets));
}
assertEquals("Size of pendingReplications ",10,pendingReplications.size());
Block blk=new Block(8,8,0);
pendingReplications.decrement(blk,storages[7].getDatanodeDescriptor());
assertEquals("pendingReplications.getNumReplicas ",7,pendingReplications.getNumReplicas(blk));
for (int i=0; i < 7; i++) {
pendingReplications.decrement(blk,storages[i].getDatanodeDescriptor());
}
assertTrue(pendingReplications.size() == 9);
pendingReplications.increment(blk,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(8)));
assertTrue(pendingReplications.size() == 10);
for (int i=0; i < 10; i++) {
Block block=new Block(i,i,0);
int numReplicas=pendingReplications.getNumReplicas(block);
assertTrue(numReplicas == i);
}
assertTrue(pendingReplications.getTimedOutBlocks() == null);
try {
Thread.sleep(1000);
}
catch ( Exception e) {
}
for (int i=10; i < 15; i++) {
Block block=new Block(i,i,0);
pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(i)));
}
assertTrue(pendingReplications.size() == 15);
int loop=0;
while (pendingReplications.size() > 0) {
try {
Thread.sleep(1000);
}
catch ( Exception e) {
}
loop++;
}
System.out.println("Had to wait for " + loop + " seconds for the lot to timeout");
assertEquals("Size of pendingReplications ",0,pendingReplications.size());
Block[] timedOut=pendingReplications.getTimedOutBlocks();
assertTrue(timedOut != null && timedOut.length == 15);
for (int i=0; i < timedOut.length; i++) {
assertTrue(timedOut[i].getBlockId() < 15);
}
pendingReplications.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException {
assumeTrue(!Path.WINDOWS);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FSDataOutputStream out=null;
try {
final FSNamesystem namesystem=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1");
out=fs.create(testPath,(short)2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf,1,true,null,null,null);
String bpid=namesystem.getBlockPoolId();
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath);
Block block=blk.getLocalBlock();
DataNode dn=cluster.getDataNodes().get(0);
File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block);
File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block);
assertTrue("Could not delete the block file from the RBW folder",blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete());
out.close();
int liveReplicas=0;
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) {
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas);
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) {
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas",2,liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem,blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
}
finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4");
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,writerDesc);
assertEquals(targets.length,0);
targets=chooseTarget(1,writerDesc);
assertEquals(targets.length,1);
targets=chooseTarget(2,writerDesc);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,writerDesc);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChooseTargetWithStaleNodes() throws Exception {
dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(namenode.getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeStorageInfo[] targets;
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
List chosenNodes=new ArrayList();
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
dataNodes[0].setLastUpdate(Time.now());
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
* and when the number of replicas is less or equal to 3, all the healthy
* datanodes should be returned by the chooseTarget method. When the number
* of replicas is 4, a stale node should be included.
* @throws Exception
*/
@Test public void testChooseTargetWithHalfStaleNodes() throws Exception {
for (int i=0; i < 3; i++) {
dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
DatanodeStorageInfo[] targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
assertFalse(containsWithinRange(targets[1],dataNodes,0,2));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertTrue(containsWithinRange(targets[0],dataNodes,3,5));
assertTrue(containsWithinRange(targets[1],dataNodes,3,5));
assertTrue(containsWithinRange(targets[2],dataNodes,3,5));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertTrue(containsWithinRange(dataNodes[3],targets,0,3));
assertTrue(containsWithinRange(dataNodes[4],targets,0,3));
assertTrue(containsWithinRange(dataNodes[5],targets,0,3));
for (int i=0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,true);
String[] hosts=new String[]{"host1","host2","host3","host4","host5","host6"};
String[] racks=new String[]{"/d1/r1","/d1/r1","/d1/r2","/d1/r2","/d2/r3","/d2/r3"};
MiniDFSCluster miniCluster=new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build();
miniCluster.waitActive();
try {
for (int i=0; i < 2; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
int numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeDescriptor staleNodeInfo=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
BlockPlacementPolicy replicator=miniCluster.getNameNode().getNamesystem().getBlockManager().getBlockPlacementPolicy();
DatanodeStorageInfo[] targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
for (int i=0; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,4);
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],staleNodeInfo));
for (int i=2; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,false);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now());
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=chooseTarget(3,staleNodeInfo);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
}
finally {
miniCluster.shutdown();
}
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica,
* @throws Exception
*/
@Test public void testChoooseTarget4() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(targets[i],dataNodes[0]));
}
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifier BooleanVerifier
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a value greater than 1 is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test public void testGetInvalidateWorkPctPerIteration_GreaterThanOne(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
InternalCallVerifier BooleanVerifier
/**
* Test for the high priority blocks are processed before the low priority
* blocks.
*/
@Test(timeout=60000) public void testReplicationWithPriority() throws Exception {
int DFS_NAMENODE_REPLICATION_INTERVAL=1000;
int HIGH_PRIORITY=0;
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
try {
cluster.waitActive();
final UnderReplicatedBlocks neededReplications=cluster.getNameNode().getNamesystem().getBlockManager().neededReplications;
for (int i=0; i < 100; i++) {
neededReplications.add(new Block(random.nextLong()),2,0,3);
}
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
neededReplications.add(new Block(random.nextLong()),1,0,3);
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
assertFalse("Not able to clear the element from high priority list",neededReplications.iterator(HIGH_PRIORITY).hasNext());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test public void testGetReplWorkMultiplier(){
Configuration conf=new Configuration();
int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier,3);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
Set excludedNodes;
DatanodeStorageInfo[] targets;
List chosenNodes=new ArrayList();
excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(0,chosenNodes,excludedNodes);
assertEquals(targets.length,0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(2,chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(3,chosenNodes,excludedNodes);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(4,chosenNodes,excludedNodes);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[2] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[2]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],dataNodes[2]));
targets=chooseTarget(1,dataNodes[2],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,dataNodes[2],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length,NUM_OF_DATANODES - 2);
final List log=appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry=log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test public void testGetInvalidateWorkPctPerIteration(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack than rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[1],dataNodes[0]));
}
APIUtilityVerifier BooleanVerifier
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a negative value is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test public void testGetInvalidateWorkPctPerIteration_NegativeValue(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"-0.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
*/
@Test public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
String blockPoolId=namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),blockPoolId,dataNodes[3].getCacheCapacity(),dataNodes[3].getCacheRemaining(),2,0,0);
dnManager.handleHeartbeat(dnrList.get(4),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),blockPoolId,dataNodes[4].getCacheCapacity(),dataNodes[4].getCacheRemaining(),4,0,0);
dnManager.handleHeartbeat(dnrList.get(5),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),blockPoolId,dataNodes[5].getCacheCapacity(),dataNodes[5].getCacheRemaining(),4,0,0);
final int load=2 + 4 + 4;
FSNamesystem fsn=namenode.getNamesystem();
assertEquals((double)load / 6,fsn.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < 3; i++) {
DatanodeDescriptor d=dnManager.getDatanode(dnrList.get(i));
dnManager.startDecommission(d);
d.setDecommissioned();
}
assertEquals((double)load / 3,fsn.getInServiceXceiverAverage(),EPSILON);
DatanodeStorageInfo[] targets=namenode.getNamesystem().getBlockManager().getBlockPlacementPolicy().chooseTarget("testFile.txt",3,dataNodes[0],new ArrayList(),false,null,1024,StorageType.DEFAULT);
assertEquals(3,targets.length);
Set targetSet=new HashSet(Arrays.asList(targets));
for (int i=3; i < storages.length; i++) {
assertTrue(targetSet.contains(storages[i]));
}
}
finally {
dataNodes[0].stopDecommission();
dataNodes[1].stopDecommission();
dataNodes[2].stopDecommission();
namenode.getNamesystem().writeUnlock();
}
NameNode.LOG.info("Done working on it");
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChooseTargetWithDependencies() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
DatanodeDescriptor node=dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap();
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName());
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
DatanodeStorageInfo[] targets;
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodesForDependencies[5]);
targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(targets[0],storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES);
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(dataNodes[3],targets[0]));
targets=chooseTarget(1,dataNodes[3],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0]));
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
targets=chooseTarget(2,dataNodes[3],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1]));
}
APIUtilityVerifier BooleanVerifier
/**
* Test re-replication policy in boundary case.
* Rack 2 has only one node group & the node in this node group is chosen
* Rack 1 has two nodegroups & one of them is chosen.
* Replica policy should choose the node from node group of Rack1 but not the
* same nodegroup with chosen nodes.
*/
@Test public void testRereplicateOnBoundaryTopology() throws Exception {
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
chosenNodes.add(storagesInBoundaryCase[0]);
chosenNodes.add(storagesInBoundaryCase[5]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(1,dataNodesInBoundaryCase[0],chosenNodes);
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5],targets[0]));
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
* The 1st replica will be placed on writer.
* The 2nd replica should be placed on a different rack
* The 3rd replica should be placed on the same rack with writer, but on a
* different node group.
*/
@Test public void testChooseTargetsOnBoundaryTopology() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
cluster.add(dataNodesInBoundaryCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,0);
targets=chooseTarget(1,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,1);
targets=chooseTarget(2,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,NODE);
assertEquals(targets.length,0);
targets=chooseTarget(1,NODE);
assertEquals(targets.length,1);
targets=chooseTarget(2,NODE);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,NODE);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator;
List chosenNodes=new ArrayList();
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(cluster.isNodeGroupAware());
for (int i=1; i < 4; i++) {
assertFalse(isOnSameNodeGroup(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
assertTrue(cluster.isNodeGroupAware());
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica, but
* in different node group.
* @throws Exception
*/
@Test public void testChooseTarget4() throws Exception {
for (int i=0; i < 3; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(dataNodes[0],targets[i]));
}
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test replica placement policy in case of targets more than number of
* NodeGroups.
* The 12-nodes cluster only has 6 NodeGroups, but in some cases, like:
* placing submitted job file, there is requirement to choose more (10)
* targets for placing replica. We should test it can return 6 targets.
*/
@Test public void testChooseMoreTargetsThanNodeGroups() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
DatanodeDescriptor node=dataNodesInBoundaryCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
cluster.add(dataNodesInMoreTargetsCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(3,dataNodesInMoreTargetsCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
targets=chooseTarget(10,dataNodesInMoreTargetsCase[0]);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
assertEquals(targets.length,6);
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node (and node group)
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameNodeGroup(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that adding blocks with different replication counts puts them
* into different queues
* @throws Throwable if something goes wrong
*/
@Test public void testBlockPriorities() throws Throwable {
UnderReplicatedBlocks queues=new UnderReplicatedBlocks();
Block block1=new Block(1);
Block block2=new Block(2);
Block block_very_under_replicated=new Block(3);
Block block_corrupt=new Block(4);
assertAdded(queues,block1,1,0,3);
assertEquals(1,queues.getUnderReplicatedBlockCount());
assertEquals(1,queues.size());
assertInLevel(queues,block1,UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
assertFalse(queues.add(block1,1,0,3));
assertAdded(queues,block2,2,0,3);
assertEquals(2,queues.getUnderReplicatedBlockCount());
assertEquals(2,queues.size());
assertInLevel(queues,block2,UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
assertAdded(queues,block_corrupt,0,0,3);
assertEquals(3,queues.size());
assertEquals(2,queues.getUnderReplicatedBlockCount());
assertEquals(1,queues.getCorruptBlockSize());
assertInLevel(queues,block_corrupt,UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
assertAdded(queues,block_very_under_replicated,4,0,25);
assertInLevel(queues,block_very_under_replicated,UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testReadWriteReplicaState(){
try {
DataOutputBuffer out=new DataOutputBuffer();
DataInputBuffer in=new DataInputBuffer();
for ( HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) {
repState.write(out);
in.reset(out.getData(),out.getLength());
HdfsServerConstants.ReplicaState result=HdfsServerConstants.ReplicaState.read(in);
assertTrue("testReadWrite error !!!",repState == result);
out.reset();
in.reset();
}
}
catch ( Exception ex) {
fail("testReadWrite ex error ReplicaState");
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
* @throws IOException in case of errors
*/
@Test(timeout=300000) public void blockReport_02() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
File dataDir=new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List blocks2Remove=new ArrayList();
List removedIndex=new ArrayList();
List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex=rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex);
}
for ( Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0=cluster.getDataNodes().get(DN_N0);
for ( ExtendedBlock b : blocks2Remove) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
}
else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(DN_RESCAN_EXTRA_WAIT);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks());
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* BlockRecovery_02.8.
* Two replicas are in Finalized state
* @throws IOException in case of an error
*/
@Test public void testFinalizedReplicas() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 1,ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 2,ReplicaState.FINALIZED);
InterDatanodeProtocol dn1=mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2=mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1,replica2,dn1,dn2,REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block,RECOVERY_ID,REPLICA_LEN1);
verify(dn2).updateReplicaUnderRecovery(block,RECOVERY_ID,REPLICA_LEN1);
replica1=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 1,ReplicaState.FINALIZED);
replica2=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN2,GEN_STAMP - 2,ReplicaState.FINALIZED);
try {
testSyncReplicas(replica1,replica2,dn1,dn2,REPLICA_LEN1);
Assert.fail("Two finalized replicas should not have different lengthes!");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Inconsistent size of finalized replicas. "));
}
}
BooleanVerifier
/**
* Test to verify the race between finalizeBlock and Lease recovery
* @throws Exception
*/
@Test(timeout=20000) public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
tearDown();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,"1000");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitClusterUp();
DistributedFileSystem fs=cluster.getFileSystem();
Path path=new Path("/test");
FSDataOutputStream out=fs.create(path);
out.writeBytes("data");
out.hsync();
List blocks=DFSTestUtil.getAllBlocks(fs.open(path));
final LocatedBlock block=blocks.get(0);
final DataNode dataNode=cluster.getDataNodes().get(0);
final AtomicBoolean recoveryInitResult=new AtomicBoolean(true);
Thread recoveryThread=new Thread(){
@Override public void run(){
try {
DatanodeInfo[] locations=block.getLocations();
final RecoveringBlock recoveringBlock=new RecoveringBlock(block.getBlock(),locations,block.getBlock().getGenerationStamp() + 1);
synchronized (dataNode.data) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}
}
catch ( Exception e) {
recoveryInitResult.set(false);
}
}
}
;
recoveryThread.start();
try {
out.close();
}
catch ( IOException e) {
Assert.assertTrue("Writing should fail",e.getMessage().contains("are bad. Aborting..."));
}
finally {
recoveryThread.join();
}
Assert.assertTrue("Recovery should be initiated successfully",recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(),block.getBlock().getGenerationStamp() + 1,block.getBlockSize());
}
finally {
if (null != cluster) {
cluster.shutdown();
cluster=null;
}
}
}
TestCleaner BranchVerifier BooleanVerifier HybridVerifier
/**
* Cleans the resources and closes the instance of datanode
* @throws IOException if an error occurred
*/
@After public void tearDown() throws IOException {
if (!tearDownDone && dn != null) {
try {
dn.shutdown();
}
catch ( Exception e) {
LOG.error("Cannot close: ",e);
}
finally {
File dir=new File(DATA_DIR);
if (dir.exists()) Assert.assertTrue("Cannot delete data-node dirs",FileUtil.fullyDelete(dir));
}
tearDownDone=true;
}
}
BooleanVerifier
@Test public void testThrottler() throws IOException {
Configuration conf=new HdfsConfiguration();
FileSystem.setDefaultUri(conf,"hdfs://localhost:0");
long bandwidthPerSec=1024 * 1024L;
final long TOTAL_BYTES=6 * bandwidthPerSec;
long bytesToSend=TOTAL_BYTES;
long start=Time.now();
DataTransferThrottler throttler=new DataTransferThrottler(bandwidthPerSec);
long totalBytes=0L;
long bytesSent=1024 * 512L;
throttler.throttle(bytesSent);
bytesToSend-=bytesSent;
bytesSent=1024 * 768L;
throttler.throttle(bytesSent);
bytesToSend-=bytesSent;
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
throttler.throttle(bytesToSend);
long end=Time.now();
assertTrue(totalBytes * 1000 / (end - start) <= bandwidthPerSec);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlockReplacement() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final String[] INITIAL_RACKS={"/RACK0","/RACK1","/RACK2"};
final String[] NEW_RACKS={"/RACK2"};
final short REPLICATION_FACTOR=(short)3;
final int DEFAULT_BLOCK_SIZE=1024;
final Random r=new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,DEFAULT_BLOCK_SIZE / 2);
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path fileName=new Path("/tmp.txt");
DFSTestUtil.createFile(fs,fileName,DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,r.nextLong());
DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,CONF);
List locatedBlocks=client.getNamenode().getBlockLocations("/tmp.txt",0,DEFAULT_BLOCK_SIZE).getLocatedBlocks();
assertEquals(1,locatedBlocks.size());
LocatedBlock block=locatedBlocks.get(0);
DatanodeInfo[] oldNodes=block.getLocations();
assertEquals(oldNodes.length,3);
ExtendedBlock b=block.getBlock();
cluster.startDataNodes(CONF,1,true,null,NEW_RACKS);
cluster.waitActive();
DatanodeInfo[] datanodes=client.datanodeReport(DatanodeReportType.ALL);
DatanodeInfo newNode=null;
for ( DatanodeInfo node : datanodes) {
Boolean isNewNode=true;
for ( DatanodeInfo oldNode : oldNodes) {
if (node.equals(oldNode)) {
isNewNode=false;
break;
}
}
if (isNewNode) {
newNode=node;
break;
}
}
assertTrue(newNode != null);
DatanodeInfo source=null;
ArrayList proxies=new ArrayList(2);
for ( DatanodeInfo node : datanodes) {
if (node != newNode) {
if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
source=node;
}
else {
proxies.add(node);
}
}
}
assertTrue(source != null && proxies.size() == 2);
LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block "+ b);
assertFalse(replaceBlock(b,source,newNode,proxies.get(0)));
LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block "+ b);
assertFalse(replaceBlock(b,source,proxies.get(0),proxies.get(1)));
LOG.info("Testcase 3: Source=" + source + " Proxy="+ proxies.get(0)+ " Destination="+ newNode);
assertTrue(replaceBlock(b,source,proxies.get(0),newNode));
checkBlocks(new DatanodeInfo[]{newNode,proxies.get(0),proxies.get(1)},fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
LOG.info("Testcase 4: invalid del hint " + proxies.get(0));
assertTrue(replaceBlock(b,proxies.get(0),proxies.get(1),source));
checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test BPService Thread Exit
*/
@Test public void testBPServiceExit() throws Exception {
DataNode dn=cluster.getDataNodes().get(0);
stopBPServiceThreads(1,dn);
assertTrue("DataNode should not exit",dn.isDatanodeUp());
stopBPServiceThreads(2,dn);
assertFalse("DataNode should exit",dn.isDatanodeUp());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test public void testRoundTripAckMetric() throws Exception {
final int datanodeCount=2;
final int interval=1;
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path testFile=new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout=fs.create(testFile,(short)datanodeCount);
DFSOutputStream dout=(DFSOutputStream)fsout.getWrappedStream();
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline=null;
int count=0;
while (pipeline == null && count < 5) {
pipeline=dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
DatanodeInfo headInfo=pipeline[0];
DataNode headNode=null;
for ( DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode=datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline",headNode);
Thread.sleep((interval + 1) * 1000);
MetricsRecordBuilder dnMetrics=getMetrics(headNode.getMetrics().name());
assertTrue("Expected non-zero number of acks",getLongCounter("PacketAckRoundTripTimeNanosNumOps",dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s",dnMetrics);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf,1,true,null,null);
Thread.sleep(10000);
DataNode dn=cluster.getDataNodes().get(0);
assertTrue("Datanode should be running",dn.isDatanodeUp());
assertEquals("BPOfferService should be running",1,dn.getAllBpOs().length);
DataNodeProperties dnProp=cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1=cluster.getConfiguration(0);
Configuration nn2=cluster.getConfiguration(1);
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),FSNamesystem.getNamespaceDirs(nn2),nn2);
cluster.restartNameNode(0,false);
cluster.restartNameNode(1,false);
cluster.restartDataNode(dnProp);
Thread.sleep(10000);
dn=cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed",dn.isDatanodeUp());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* starts single nn and single dn and verifies registration and handshake
* @throws IOException
*/
@Test public void testFedSingleNN() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nameNodePort(9927).build();
try {
NameNode nn1=cluster.getNameNode();
assertNotNull("cannot create nn1",nn1);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="+ bpos.bpRegistration.getDatanodeUuid()+ "; nna="+ getNNSocketAddress(bpos));
}
BPOfferService bpos1=dn.getAllBpOs()[0];
bpos1.triggerBlockReportForTests();
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong cid",dn.getClusterId(),cid1);
cluster.shutdown();
assertEquals(0,dn.getAllBpOs().length);
cluster=null;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMiniDFSClusterWithMultipleNN() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
Assert.assertEquals("(1)Should be 2 namenodes",2,cluster.getNumNameNodes());
cluster.addNameNode(conf,0);
Assert.assertEquals("(1)Should be 3 namenodes",3,cluster.getNumNameNodes());
}
catch ( IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
}
finally {
cluster.shutdown();
}
conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1)).build();
try {
Assert.assertNotNull(cluster);
cluster.waitActive();
Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes());
cluster.addNameNode(conf,0);
Assert.assertEquals("(2)Should be 2 namenodes",2,cluster.getNumNameNodes());
}
catch ( IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
}
finally {
cluster.shutdown();
}
conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
Assert.assertNotNull(cluster);
Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes());
cluster.addNameNode(conf,9929);
Assert.fail("shouldn't be able to add another NN to non federated cluster");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().startsWith("cannot add namenode"));
Assert.assertEquals("(3)Should be 1 namenodes",1,cluster.getNumNameNodes());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testClusterIdMismatchAtStartupWithHA() throws Exception {
MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
top.setFederation(true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf,1,true,null,null);
Thread.sleep(10000);
DataNode dn=cluster.getDataNodes().get(0);
assertTrue("Datanode should be running",dn.isDatanodeUp());
assertEquals("Only one BPOfferService should be running",1,dn.getAllBpOs().length);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* start multiple NNs and single DN and verifies per BP registrations and
* handshakes.
* @throws IOException
*/
@Test public void test2NNRegistration() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
assertNotNull("cannot create nn1",nn1);
assertNotNull("cannot create nn2",nn2);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String bpid2=FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
String cid2=FSImageTestUtil.getFSImage(nn2).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
int lv2=FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
int ns1=FSImageTestUtil.getFSImage(nn1).getNamespaceID();
int ns2=FSImageTestUtil.getFSImage(nn2).getNamespaceID();
assertNotSame("namespace ids should be different",ns1,ns2);
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
LOG.info("nn2: lv=" + lv2 + ";cid="+ cid2+ ";bpid="+ bpid2+ ";uri="+ nn2.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("BP: " + bpos);
}
BPOfferService bpos1=dn.getAllBpOs()[0];
BPOfferService bpos2=dn.getAllBpOs()[1];
if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
BPOfferService tmp=bpos1;
bpos1=bpos2;
bpos2=tmp;
}
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong nn address",getNNSocketAddress(bpos2),nn2.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong bpid",bpos2.getBlockPoolId(),bpid2);
assertEquals("wrong cid",dn.getClusterId(),cid1);
assertEquals("cid should be same",cid2,cid1);
assertEquals("namespace should be same",bpos1.bpNSInfo.namespaceID,ns1);
assertEquals("namespace should be same",bpos2.bpNSInfo.namespaceID,ns2);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=600000) public void testDatanodePeersXceiver() throws Exception {
try {
startCluster();
String testFile1="/TestDataNodeXceiver1.dat";
String testFile2="/TestDataNodeXceiver2.dat";
String testFile3="/TestDataNodeXceiver3.dat";
DFSClient client1=new DFSClient(NameNode.getAddress(conf),conf);
DFSClient client2=new DFSClient(NameNode.getAddress(conf),conf);
DFSClient client3=new DFSClient(NameNode.getAddress(conf),conf);
DFSOutputStream s1=(DFSOutputStream)client1.create(testFile1,true);
DFSOutputStream s2=(DFSOutputStream)client2.create(testFile2,true);
DFSOutputStream s3=(DFSOutputStream)client3.create(testFile3,true);
byte[] toWrite=new byte[1024 * 1024 * 8];
Random rb=new Random(1111);
rb.nextBytes(toWrite);
s1.write(toWrite,0,1024 * 1024 * 8);
s1.flush();
s2.write(toWrite,0,1024 * 1024 * 8);
s2.flush();
s3.write(toWrite,0,1024 * 1024 * 8);
s3.flush();
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer().getNumPeersXceiver());
s1.close();
s2.close();
s3.close();
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer().getNumPeersXceiver());
client1.close();
client2.close();
client3.close();
}
finally {
shutdownCluster();
}
}
InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that individual volume failures do not cause DNs to fail, that
* all volumes failed on a single datanode do cause it to fail, and
* that the capacities and liveliness is adjusted correctly in the NN.
*/
@Test public void testSuccessiveVolumeFailures() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
Thread.sleep(WAIT_FOR_HEARTBEATS);
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1));
File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
ArrayList dns=cluster.getDataNodes();
assertTrue("DN1 should be up",dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up",dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name()));
assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name()));
assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name()));
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)3);
assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name()));
ArrayList live=new ArrayList();
ArrayList dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
live.clear();
dead.clear();
dm.fetchDatanodes(live,dead,false);
assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures());
dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false));
Path file3=new Path("/test3");
DFSTestUtil.createFile(fs,file3,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file3,(short)2);
DFSTestUtil.waitForDatanodeDeath(dns.get(2));
assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name()));
DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true));
cluster.restartDataNodes();
cluster.waitActive();
Path file4=new Path("/test4");
DFSTestUtil.createFile(fs,file4,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file4,(short)3);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS);
}
BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test that the NN re-learns of volume failures after restart.
*/
@Test public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)2,1L);
DFSTestUtil.waitReplication(fs,file1,(short)2);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
cluster.restartNameNode(0);
cluster.waitActive();
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN tolerates a failed-to-use scenario during
* its start-up.
*/
@Test public void testValidVolumesAtStartup() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,1);
File tld=new File(MiniDFSCluster.getBaseDirectory(),"badData");
File dataDir1=new File(tld,"data1");
File dataDir1Actual=new File(dataDir1,"1");
dataDir1Actual.mkdirs();
File dataDir2=new File(tld,"data2");
prepareDirToFail(dataDir2);
File dataDir2Actual=new File(dataDir2,"2");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
cluster.startDataNodes(conf,1,false,null,null);
cluster.waitActive();
try {
assertTrue("The DN should have started up fine.",cluster.isDataNodeUp());
DataNode dn=cluster.getDataNodes().get(0);
String si=DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
assertTrue("The DN should have started with this directory",si.contains(dataDir1Actual.getPath()));
assertFalse("The DN shouldn't have a bad directory.",si.contains(dataDir2Actual.getPath()));
}
finally {
cluster.shutdownDataNodes();
FileUtil.chmod(dataDir2.toString(),"755");
}
}
BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test public void testConfigureMinValidVolumes() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,0);
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)2);
DFSTestUtil.waitForDatanodeStatus(dm,2,1,0,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)2);
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException {
storage=new DataStorage();
nsInfo=new NamespaceInfo(0,CLUSTER_ID,DEFAULT_BPID,CTIME,BUILD_VERSION,SOFTWARE_VERSION);
FileUtil.fullyDelete(TEST_DIR);
assertTrue("Failed to make test dir.",TEST_DIR.mkdirs());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDfsAdminDeleteBlockPool() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2");
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs1=cluster.getFileSystem(0);
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)1,54);
DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)1,54);
DataNode dn1=cluster.getDataNodes().get(0);
String bpid1=cluster.getNamesystem(0).getBlockPoolId();
String bpid2=cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1=cluster.getInstanceStorageDir(0,0);
File dn1StorageDir2=cluster.getInstanceStorageDir(0,1);
Configuration nn1Conf=cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1,dn1.getAllBpOs().length);
DFSAdmin admin=new DFSAdmin(nn1Conf);
String dn1Address=dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args={"-deleteBlockPool",dn1Address,bpid2};
int ret=admin.run(args);
assertFalse(0 == ret);
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2);
String[] forceArgs={"-deleteBlockPool",dn1Address,bpid2,"force"};
ret=admin.run(forceArgs);
assertEquals(0,ret);
verifyBlockPoolDirectories(false,dn1StorageDir1,bpid2);
verifyBlockPoolDirectories(false,dn1StorageDir2,bpid2);
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to check that a DN goes down when all its volumes have failed.
*/
@Test public void testShutdown() throws Exception {
if (System.getProperty("os.name").startsWith("Windows")) {
return;
}
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final int dnIndex=0;
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(dnIndex,0);
File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid);
storageDir=cluster.getInstanceStorageDir(dnIndex,1);
File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid);
try {
assertTrue("Couldn't chmod local vol",dir1.setReadOnly());
assertTrue("Couldn't chmod local vol",dir2.setReadOnly());
DataNode dn=cluster.getDataNodes().get(dnIndex);
for (int i=0; dn.isDatanodeUp(); i++) {
Path fileName=new Path("/test.txt" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)2,1L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
fs.delete(fileName,true);
}
}
finally {
FileUtil.setWritable(dir1,true);
FileUtil.setWritable(dir2,true);
}
}
InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not.
* Before refactoring the code the above function was not getting called
* @throws IOException, InterruptedException
*/
@Test public void testcheckDiskError() throws IOException, InterruptedException {
if (cluster.getDataNodes().size() <= 0) {
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
}
DataNode dataNode=cluster.getDataNodes().get(0);
long slackTime=dataNode.checkDiskErrorInterval / 2;
dataNode.checkDiskErrorAsync();
Thread.sleep(dataNode.checkDiskErrorInterval);
long lastDiskErrorCheck=dataNode.getLastDiskErrorCheck();
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms",((Time.monotonicNow() - lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
}
BooleanVerifier
@Test(timeout=600000) public void testFilesExceedMaxLockedMemory() throws Exception {
LOG.info("beginning testFilesExceedMaxLockedMemory");
final int numFiles=5;
final long fileSize=CACHE_CAPACITY / (numFiles - 1);
final Path[] testFiles=new Path[numFiles];
final HdfsBlockLocation[][] fileLocs=new HdfsBlockLocation[numFiles][];
final long[] fileSizes=new long[numFiles];
for (int i=0; i < numFiles; i++) {
testFiles[i]=new Path("/testFilesExceedMaxLockedMemory-" + i);
DFSTestUtil.createFile(fs,testFiles[i],fileSize,(short)1,0xDFAl);
fileLocs[i]=(HdfsBlockLocation[])fs.getFileBlockLocations(testFiles[i],0,fileSize);
long[] sizes=getBlockSizes(fileLocs[i]);
for (int j=0; j < sizes.length; j++) {
fileSizes[i]+=sizes[j];
}
}
long total=0;
DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
for (int i=0; i < numFiles - 1; i++) {
setHeartbeatResponse(cacheBlocks(fileLocs[i]));
total=DFSTestUtil.verifyExpectedCacheUsage(rounder.round(total + fileSizes[i]),4 * (i + 1),fsd);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
setHeartbeatResponse(cacheBlocks(fileLocs[numFiles - 1]));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
int lines=appender.countLinesWithMessage("more bytes in the cache: " + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
return lines > 0;
}
}
,500,30000);
assertTrue("Expected more than 0 failed cache attempts",fsd.getNumBlocksFailedToCache() > 0);
int curCachedBlocks=16;
for (int i=0; i < numFiles - 1; i++) {
setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
long uncachedBytes=rounder.round(fileSizes[i]);
total-=uncachedBytes;
curCachedBlocks-=uncachedBytes / BLOCK_SIZE;
DFSTestUtil.verifyExpectedCacheUsage(total,curCachedBlocks,fsd);
}
LOG.info("finishing testFilesExceedMaxLockedMemory");
}
BooleanVerifier
@Test(timeout=60000) public void testPageRounder() throws Exception {
Path fileName=new Path("/testPageRounder");
final int smallBlocks=512;
assertTrue("Page size should be greater than smallBlocks!",PAGE_SIZE > smallBlocks);
final int numBlocks=5;
final int fileLen=smallBlocks * numBlocks;
FSDataOutputStream out=fs.create(fileName,false,4096,(short)1,smallBlocks);
out.write(new byte[fileLen]);
out.close();
HdfsBlockLocation[] locs=(HdfsBlockLocation[])fs.getFileBlockLocations(fileName,0,fileLen);
setHeartbeatResponse(cacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks,numBlocks,fsd);
setHeartbeatResponse(uncacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
}
BooleanVerifier
/**
* Add a received block entry and then replace it. Ensure that a single
* IBR is generated and that pending receive request state is cleared.
* This test case verifies the failure in HDFS-5922.
* @throws InterruptedException
* @throws IOException
*/
@Test(timeout=60000) public void testReplaceReceivedBlock() throws InterruptedException, IOException {
try {
DatanodeProtocolClientSideTranslatorPB nnSpy=spyOnDnCallsToNn();
injectBlockReceived();
injectBlockReceived();
Thread.sleep(2000);
Mockito.verify(nnSpy,atLeastOnce()).blockReceivedAndDeleted(any(DatanodeRegistration.class),anyString(),any(StorageReceivedDeletedBlocks[].class));
assertFalse(actor.hasPendingIBR());
}
finally {
cluster.shutdown();
cluster=null;
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefreshNamenodes() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new NSConf("ns1").addNN(new NNConf(null).setIpcPort(nnPort1))).setFederation(true);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).build();
DataNode dn=cluster.getDataNodes().get(0);
assertEquals(1,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort2);
assertEquals(2,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort3);
assertEquals(3,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort4);
Set nnAddrsFromCluster=Sets.newHashSet();
for (int i=0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(cluster.getNameNode(i).getNameNodeAddress()));
}
Set nnAddrsFromDN=Sets.newHashSet();
for ( BPOfferService bpos : dn.getAllBpOs()) {
for ( BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
}
}
assertEquals("",Joiner.on(",").join(Sets.symmetricDifference(nnAddrsFromCluster,nnAddrsFromDN)));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFSDatasetFactory(){
final Configuration conf=new Configuration();
FsDatasetSpi.Factory> f=FsDatasetSpi.Factory.getFactory(conf);
assertEquals(FsDatasetFactory.class,f.getClass());
assertFalse(f.isSimulated());
SimulatedFSDataset.setFactory(conf);
FsDatasetSpi.Factory> s=FsDatasetSpi.Factory.getFactory(conf);
assertEquals(SimulatedFSDataset.Factory.class,s.getClass());
assertTrue(s.isSimulated());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteRead() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
addSomeBlocks(fsdataset);
for (int i=1; i <= NUMBLOCKS; ++i) {
ExtendedBlock b=new ExtendedBlock(bpid,i,0,0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i),fsdataset.getLength(b));
checkBlockDataAndSize(fsdataset,b,blockIdToLen(i));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
int bytesAdded=addSomeBlocks(fsdataset);
Block[] deleteBlocks=new Block[2];
deleteBlocks[0]=new Block(1,0,0);
deleteBlocks[1]=new Block(2,0,0);
fsdataset.invalidate(bpid,deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[0]));
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[1]));
long sizeDeleted=blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded - sizeDeleted,fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted,fsdataset.getRemaining());
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b=new Block(i,0,0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid,b)));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
ExtendedBlock b=new ExtendedBlock(bpid,1,5,0);
try {
assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
addSomeBlocks(fsdataset);
b=new ExtendedBlock(bpid,1,0,0);
InputStream metaInput=fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput=new DataInputStream(metaInput);
short version=metaDataInput.readShort();
assertEquals(BlockMetadataHeader.VERSION,version);
DataChecksum checksum=DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.Type.NULL,checksum.getChecksumType());
assertEquals(0,checksum.getChecksumSize());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
int bytesAdded=addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
fsdataset=null;
SimulatedFSDataset sfsdataset=getSimulatedFSDataset();
bytesAdded+=addSomeBlocks(sfsdataset,NUMBLOCKS + 1);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid,blockReport);
blockReport=sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS * 2,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b)));
}
assertEquals(bytesAdded,sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining());
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,10);
try {
sfsdataset=getSimulatedFSDataset();
sfsdataset.addBlockPool(bpid,conf);
sfsdataset.injectBlocks(bpid,blockReport);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testSecureNameNode() throws Exception {
MiniDFSCluster cluster=null;
try {
String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab=System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified",nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified",nnKeyTab);
String dnPrincipal=System.getProperty("dfs.datanode.kerberos.principal");
String dnKeyTab=System.getProperty("dfs.datanode.keytab.file");
assertNotNull("DataNode principal was not specified",dnPrincipal);
assertNotNull("DataNode keytab was not specified",dnKeyTab);
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,dnPrincipal);
conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,dnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,"127.0.0.1:1004");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,"127.0.0.1:1006");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,"700");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).checkDataNodeAddrConfig(true).build();
cluster.waitActive();
assertTrue(cluster.isDataNodeUp());
}
catch ( Exception ex) {
ex.printStackTrace();
throw ex;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransferRbw() throws Exception {
final HdfsConfiguration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
cluster.waitActive();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
final int size=(1 << 16) + RAN.nextInt(1 << 16);
LOG.info("size = " + size);
final FSDataOutputStream out=fs.create(p,REPLICATION);
final byte[] bytes=new byte[1024];
for (int remaining=size; remaining > 0; ) {
RAN.nextBytes(bytes);
final int len=bytes.length < remaining ? bytes.length : remaining;
out.write(bytes,0,len);
out.hflush();
remaining-=len;
}
final ReplicaBeingWritten oldrbw;
final DataNode newnode;
final DatanodeInfo newnodeinfo;
final String bpid=cluster.getNamesystem().getBlockPoolId();
{
final DataNode oldnode=cluster.getDataNodes().get(0);
oldrbw=getRbw(oldnode,bpid);
LOG.info("oldrbw = " + oldrbw);
cluster.startDataNodes(conf,1,true,null,null);
newnode=cluster.getDataNodes().get(REPLICATION);
final DatanodeInfo oldnodeinfo;
{
final DatanodeInfo[] datatnodeinfos=cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertEquals(2,datatnodeinfos.length);
int i=0;
for (DatanodeRegistration dnReg=newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ;
Assert.assertTrue(i < datatnodeinfos.length);
newnodeinfo=datatnodeinfos[i];
oldnodeinfo=datatnodeinfos[1 - i];
}
final ExtendedBlock b=new ExtendedBlock(bpid,oldrbw.getBlockId(),oldrbw.getBytesAcked(),oldrbw.getGenerationStamp());
final BlockOpResponseProto s=DFSTestUtil.transferRbw(b,DFSClientAdapter.getDFSClient(fs),oldnodeinfo,newnodeinfo);
Assert.assertEquals(Status.SUCCESS,s.getStatus());
}
final ReplicaBeingWritten newrbw=getRbw(newnode,bpid);
LOG.info("newrbw = " + newrbw);
Assert.assertEquals(oldrbw.getBlockId(),newrbw.getBlockId());
Assert.assertEquals(oldrbw.getGenerationStamp(),newrbw.getGenerationStamp());
Assert.assertEquals(oldrbw.getVisibleLength(),newrbw.getVisibleLength());
LOG.info("DONE");
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)}
*/
@Test public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid=cluster.getNamesystem().getBlockPoolId();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L);
final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr);
final DatanodeInfo[] datanodeinfo=locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
final ExtendedBlock b=locatedblock.getBlock();
final long recoveryid=b.getGenerationStamp() + 1;
final long newlength=b.getNumBytes() - 1;
final FsDatasetSpi> fsdataset=DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid));
final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId());
Assert.assertEquals(ReplicaState.RUR,replica.getState());
FsDatasetImpl.checkReplicaFiles(replica);
{
final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp());
try {
fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength);
assertTrue(storageID != null);
}
finally {
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDeserializeHAToken() throws IOException {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
DataNode dn=mock(DataNode.class);
doReturn(conf).when(dn).getConf();
ServletContext context=mock(ServletContext.class);
doReturn(dn).when(context).getAttribute("datanode");
final Token token=new Token();
DatanodeWebHdfsMethods method=new DatanodeWebHdfsMethods();
Whitebox.setInternalState(method,"context",context);
final Token tok2=method.deserializeToken(token.encodeToUrlString(),LOGICAL_NAME);
Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testRawXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
{
final byte[] value=fs.getXAttr(rawPath,raw1);
Assert.assertArrayEquals(value,value1);
}
{
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(rawPath);
assertTrue(xattrNames.contains(raw1));
assertTrue(xattrNames.contains(raw2));
assertTrue(xattrNames.size() == 2);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.setXAttr(path,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.setXAttr(rawPath,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(rawPath);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(path);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
}
{
fs.setXAttr(rawPath,raw1,value1);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
final List xattrNames=userFs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
try {
userFs.listXAttrs(rawPath);
fail("listXAttrs on raw path should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
fs.removeXAttr(rawPath,raw1);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the listXAttrs api.
* listXAttrs on a path that doesn't exist.
* listXAttrs on a path with no XAttrs
* Check basic functionality.
* Check that read access to parent dir is not enough to get xattr names
* Check that write access to the parent dir is not enough to get names
* Check that execute/scan access to the parent dir is sufficient to get
* xattr names.
*/
@Test(timeout=120000) public void testListXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
try {
fs.listXAttrs(path);
fail("expected FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("cannot find",e);
}
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
final List noXAttrs=fs.listXAttrs(path);
assertTrue("XAttrs were found?",noXAttrs.size() == 0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.contains(name1));
assertTrue(xattrNames.contains(name2));
assertTrue(xattrNames.size() == 2);
fs.setPermission(path,new FsPermission((short)0704));
final Path childDir=new Path(path,"child" + pathCount);
FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir,name1,"1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0702));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fs.setXAttr(childDir,"trusted.myxattr","1234".getBytes());
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
assertTrue(userFs.listXAttrs(childDir).size() == 1);
return null;
}
}
);
assertTrue(fs.listXAttrs(childDir).size() == 2);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
final String src="/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc=cluster.getNameNodeRpc();
nameNodeRpc.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null);
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertTrue("Block locations should be present",lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc=cluster.getNameNodeRpc();
LocatedBlock lb2=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock());
assertTrue("Wrong locations with retry",lb2.getLocations().length > 0);
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* start MiniDFScluster, try formatting with different settings
* @throws IOException
* @throws InterruptedException
*/
@Test public void testAllowFormat() throws IOException {
LOG.info("--starting mini cluster");
NameNode nn;
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertNotNull(cluster);
nn=cluster.getNameNode();
assertNotNull(nn);
LOG.info("Mini cluster created OK");
LOG.info("Verifying format will fail with allowformat false");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,false);
try {
cluster.shutdown();
NameNode.format(config);
fail("Format succeeded, when it should have failed");
}
catch ( IOException e) {
assertTrue("Exception was not about formatting Namenode",e.getMessage().startsWith("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
LOG.info("Expected failure: " + StringUtils.stringifyException(e));
LOG.info("Done verifying format will fail with allowformat false");
}
LOG.info("Verifying format will succeed with allowformat true");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
NameNode.format(config);
LOG.info("Done verifying format will succeed with allowformat true");
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Minor test related to HADOOP-9155. Verify that during a
* FileSystem.setPermission() operation, the stat passed in during the
* logAuditEvent() call returns the new permission rather than the old
* permission.
*/
@Test public void testAuditLoggerWithSetPermission() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
final Path p=new Path("/");
fs.setTimes(p,time,time);
fs.setPermission(p,new FsPermission(TEST_PERMISSION));
assertEquals(TEST_PERMISSION,DummyAuditLogger.foundPermission);
assertEquals(2,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that AuditLogger works as expected.
*/
@Test public void testAuditLogger() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
fs.setTimes(new Path("/"),time,time);
assertEquals(1,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
GetOpParam.Op op=GetOpParam.Op.GETFILESTATUS;
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
URI uri=new URI("http",NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),"/webhdfs/v1/",op.toQueryString(),null);
HttpURLConnection conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(1,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(2,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(3,DummyAuditLogger.logCount);
assertEquals("1.1.1.1",DummyAuditLogger.remoteAddr);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed stat puts proper entry in audit log
*/
@Test public void testAuditAllowedStat() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
FileStatus st=userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that stat via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfsStat() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
FileStatus st=webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that access via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfs() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
InputStream istream=webfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogsRepeat(true,3);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed operation puts proper entry in audit log
*/
@Test public void testAuditAllowed() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
InputStream istream=userfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogs(true);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test public void testBackupNodeTailsEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
fileSys=cluster.getFileSystem();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
BackupImage bnImage=(BackupImage)backup.getFSImage();
testBNInSync(cluster,backup,1);
NameNode nn=cluster.getNameNode();
NamenodeProtocols nnRpc=nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
testBNInSync(cluster,backup,2);
long nnImageBefore=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
backup.doCheckpoint();
long nnImageAfter=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: "+ nnImageAfter,nnImageAfter > nnImageBefore);
testBNInSync(cluster,backup,3);
StorageDirectory sd=bnImage.getStorage().getStorageDir(0);
backup.stop();
backup=null;
EditLogFile editsLog=FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog,editsLog.isInProgress());
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
backup=startBackupNode(conf,StartupOption.BACKUP,1);
testBNInSync(cluster,backup,4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down",false));
backup.stop(false);
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
}
finally {
LOG.info("Shutting down...");
if (backup != null) backup.stop();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(),backup);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that a file can be read both from NameNode and BackupNode.
*/
@Test public void testCanReadData() throws IOException {
Path file1=new Path("/fileToRead.dat");
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fileSys=cluster.getFileSystem();
long txid=cluster.getNameNodeRpc().getTransactionID();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
waitCheckpointDone(cluster,txid);
String rpcAddrKeyPreffix=DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
String nnAddr=cluster.getNameNode().getNameNodeAddressHostPortString();
conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
String bnAddr=backup.getNameNodeAddressHostPortString();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"bnCluster");
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"bnCluster");
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster","nnActive, nnBackup");
conf.set(rpcAddrKeyPreffix + ".nnActive",nnAddr);
conf.set(rpcAddrKeyPreffix + ".nnBackup",bnAddr);
cluster.startDataNodes(conf,3,true,StartupOption.REGULAR,null);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)3,seed);
FileSystem bnFS=FileSystem.get(new Path("hdfs://" + bnAddr).toUri(),conf);
String nnData=DFSTestUtil.readFile(fileSys,file1);
String bnData=DFSTestUtil.readFile(bnFS,file1);
assertEquals("Data read from BackupNode and NameNode is not the same.",nnData,bnData);
}
catch ( IOException e) {
LOG.error("Error in TestBackupNode: ",e);
assertTrue(e.getLocalizedMessage(),false);
}
finally {
if (fileSys != null) fileSys.close();
if (backup != null) backup.stop();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
final Path p=new Path(BASE_DIR,"file2.dat");
final String src=p.toString();
final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3);
int len=BLOCK_SIZE >>> 1;
writeFile(p,out,len);
for (int i=1; i < NUM_BLOCKS; ) {
final LocatedBlocks lb=namenode.getBlockLocations(src,0,len);
final List blocks=lb.getLocatedBlocks();
assertEquals(i,blocks.size());
final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
writeFile(p,out,BLOCK_SIZE);
len+=BLOCK_SIZE;
}
}
out.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception {
String poolName="pool1";
String ownerName="abc";
String groupName="123";
FsPermission mode=new FsPermission((short)0755);
long limit=150;
dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator iter=dfs.listCachePools();
CachePoolInfo info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
ownerName="def";
groupName="456";
mode=new FsPermission((short)0700);
limit=151;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
iter=dfs.listCachePools();
info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
dfs.removeCachePool(poolName);
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
proto.listCachePools(null);
try {
proto.removeCachePool("pool99");
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
try {
proto.removeCachePool(poolName);
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big",e);
}
CachePoolInfo coolPool=new CachePoolInfo("coolPool");
final long poolExpiration=1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator poolIt=dfs.listCachePools();
CachePoolInfo listPool=poolIt.next().getInfo();
assertFalse("Should only be one pool",poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue());
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("negative",e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("too big",e);
}
CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo=dirIt.next().getInfo();
assertFalse("Should only have one entry in listing",dirIt.hasNext());
long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000);
CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( IllegalArgumentException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
CachePoolInfo destPool=new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo=dirIt.next().getInfo();
listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt=dfs.listCachePools();
listPool=poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool=poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testExpiry() throws Exception {
String pool="pool1";
dfs.addCachePool(new CachePoolInfo(pool));
Path p=new Path("/mypath");
DFSTestUtil.createFile(dfs,p,BLOCK_SIZE * 2,(short)2,0x999);
Date start=new Date();
Date expiry=DateUtils.addSeconds(start,120);
final long id=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(p).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)).setReplication((short)2).build());
waitForCachedBlocks(cluster.getNameNode(),2,4,"testExpiry:1");
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(0)).build());
waitForCachedBlocks(cluster.getNameNode(),0,0,"testExpiry:2");
RemoteIterator it=dfs.listCacheDirectives(null);
CacheDirectiveEntry ent=it.next();
assertFalse(it.hasNext());
Date entryExpiry=new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should have expired",entryExpiry.before(new Date()));
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(120000)).build());
waitForCachedBlocks(cluster.getNameNode(),2,4,"testExpiry:3");
it=dfs.listCacheDirectives(null);
ent=it.next();
assertFalse(it.hasNext());
entryExpiry=new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should not have expired",entryExpiry.after(new Date()));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(-1)).build());
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative expiration",e);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
final String destiny="poolofdestiny";
final Path path1=new Path("/destiny");
DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494);
final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(2 * BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1=dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1");
final Path path2=new Path("/failure");
DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2");
RemoteIterator it=dfs.listCachePools();
assertTrue("Expected a cache pool",it.hasNext());
CachePoolStats stats=it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit());
CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class));
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
secondary=new SecondaryNameNode(conf);
final String pool="poolparty";
String groupName="partygroup";
FsPermission mode=new FsPermission((short)0777);
long limit=747;
dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
CachePoolInfo info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
int numEntries=10;
String entryPrefix="/party-";
long prevId=-1;
final Date expiry=new Date();
for (int i=0; i < numEntries; i++) {
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
}
RemoteIterator dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
secondary.doCheckpoint();
final String imagePool="imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
boolean fetchImage=secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage);
dfs.removeCachePool(imagePool);
cluster.restartNameNode();
pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
assertEquals(expiry.getTime(),cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1,nextId);
}
finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId=addAsUnprivileged(alpha);
long alphaId2=addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2);
long betaId=addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
}
catch ( InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool",ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
}
catch ( AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name",e);
}
long deltaId=addAsUnprivileged(delta);
long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator iter;
iter=dfs.listCacheDirectives(null);
validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter,alphaId,alphaId2,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter,betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter,alphaId2);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter,relativeId);
dfs.removeCacheDirective(betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID",e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build());
iter=dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified=iter.next().getInfo();
assertEquals(relativeId,modified.getId().longValue());
assertEquals((short)555,modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter=dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id=dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser");
final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf);
final String poolName="poolparty";
dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700)));
RemoteIterator it=myDfs.listCachePools();
CachePoolInfo info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertNull("Unexpected owner name",info.getOwnerName());
assertNull("Unexpected group name",info.getGroupName());
assertNull("Unexpected mode",info.getMode());
assertNull("Unexpected limit",info.getLimit());
final long limit=99;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
it=myDfs.listCachePools();
info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName());
assertNotNull("Expected group name",info.getGroupName());
assertEquals("Mismatched mode",(short)0700,info.getMode().toShort());
assertEquals("Mismatched limit",limit,(long)info.getLimit());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
DistributedFileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
FSNamesystem namesystem=cluster.getNamesystem();
String renewer=UserGroupInformation.getLoginUser().getUserName();
Token token1=namesystem.getDelegationToken(new Text(renewer));
Token token2=namesystem.getDelegationToken(new Text(renewer));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
NameNode nn=cluster.getNameNode();
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions",5,numTransactions);
;
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e.getMessage());
}
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn",1,numTransactions);
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
try {
renewToken(token1);
renewToken(token2);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
namesystem=cluster.getNamesystem();
Token token3=namesystem.getDelegationToken(new Text(renewer));
Token token4=namesystem.getDelegationToken(new Text(renewer));
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
Token token5=namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCommandLineParsing() throws ParseException {
SecondaryNameNode.CommandLineOpts opts=new SecondaryNameNode.CommandLineOpts();
opts.parse();
assertNull(opts.getCommand());
opts.parse("-checkpoint");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertFalse(opts.shouldForceCheckpoint());
opts.parse("-checkpoint","force");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertTrue(opts.shouldForceCheckpoint());
opts.parse("-geteditsize");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,opts.getCommand());
opts.parse("-format");
assertTrue(opts.shouldFormat());
try {
opts.parse("-geteditsize","-checkpoint");
fail("Should have failed bad parsing for two actions");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
try {
opts.parse("-checkpoint","xx");
fail("Should have failed for bad checkpoint arg");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testReloadOnEditReplayFailure() throws IOException {
Configuration conf=new HdfsConfiguration();
FSDataOutputStream fos=null;
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
fos=fs.create(new Path("tmpfile0"));
fos.write(new byte[]{0,1,2,3});
secondary.doCheckpoint();
fos.write(new byte[]{0,1,2,3});
fos.hsync();
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
}
Mockito.reset(faultInjector);
fos.write(new byte[]{0,1,2,3});
fos.hsync();
assertTrue("Another checkpoint should have reloaded image",secondary.doCheckpoint());
}
finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests checkpoint in HDFS.
*/
@Test public void testCheckpoint() throws IOException {
Path file1=new Path("checkpoint.dat");
Path file2=new Path("checkpoint2.dat");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
MetricsRecordBuilder rb=getMetrics(NN_METRICS);
assertCounterGt("GetImageNumOps",0,rb);
assertCounterGt("GetEditNumOps",0,rb);
assertCounterGt("PutImageNumOps",0,rb);
assertGaugeGt("GetImageAvgTime",0.0,rb);
assertGaugeGt("GetEditAvgTime",0.0,rb);
assertGaugeGt("PutImageAvgTime",0.0,rb);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
Path tmpDir=new Path("/tmp_tmp");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file2,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
FSDirectory secondaryFsDir=secondary.getFSNamesystem().dir;
INode rootInMap=secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
assertSame(rootInMap,secondaryFsDir.rootDir);
fileSys.delete(tmpDir,true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(tmpDir));
try {
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
cluster=null;
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test the importCheckpoint startup option. Verifies:
* 1. if the NN already contains an image, it will not be allowed
* to import a checkpoint.
* 2. if the NN does not contain an image, importing a checkpoint
* succeeds and re-saves the image
*/
@Test public void testImportCheckpoint() throws Exception {
Configuration conf=new HdfsConfiguration();
Path testPath=new Path("/testfile");
SecondaryNameNode snn=null;
MiniDFSCluster cluster=null;
Collection nameDirs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
nameDirs=cluster.getNameDirs(0);
cluster.getFileSystem().mkdirs(testPath);
snn=startSecondaryNameNode(conf);
snn.doCheckpoint();
}
finally {
cleanup(snn);
cleanup(cluster);
cluster=null;
}
LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail.");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).startupOption(StartupOption.IMPORT).build();
fail("NameNode did not fail to start when it already contained " + "an image");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("NameNode already contains an image",ioe);
}
finally {
cleanup(cluster);
cluster=null;
}
LOG.info("Removing NN storage contents");
for ( URI uri : nameDirs) {
File dir=new File(uri.getPath());
LOG.info("Cleaning " + dir);
removeAndRecreateDir(dir);
}
LOG.info("Trying to import checkpoint");
try {
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0).startupOption(StartupOption.IMPORT).build();
assertTrue("Path from checkpoint should exist after import",cluster.getFileSystem().exists(testPath));
FSImageTestUtil.assertNNHasCheckpoints(cluster,Ints.asList(3));
}
finally {
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError1() throws IOException {
LOG.info("Starting testSecondaryNamenodeError1");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointxx.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
Path dir=new Path("/checkpoint");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
FSImage image=cluster.getNameNode().getFSImage();
SecondaryNameNode secondary=null;
try {
assertTrue(!fileSys.exists(dir));
secondary=startSecondaryNameNode(conf);
File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1");
File secondaryCurrent=new File(secondaryDir,"current");
long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists());
long fsimageLength=secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length());
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint());
for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5);
assertTrue("Image size increased",imageFile.length() > fsimageLength);
}
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError2() throws IOException {
LOG.info("Starting testSecondaryNamenodeError2");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointyy.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after uploading new image")).when(faultInjector).afterSecondaryUploadsNewImage();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError3() throws IOException {
LOG.info("Starting testSecondaryNamenodeError3");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointzz.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
secondary.shutdown();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
* @throws IOException
*/
@Test public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf=new HdfsConfiguration();
String nameserviceId1="ns1";
String nameserviceId2="ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2);
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null;
SecondaryNameNode secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build();
Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress();
String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1);
snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2);
secondary1=startSecondaryNameNode(snConf1);
secondary2=startSecondaryNameNode(snConf2);
assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());
secondary1.doCheckpoint();
secondary2.doCheckpoint();
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
/**
* Checks that an IOException in NNStorage.writeTransactionIdFile is handled
* correctly (by removing the storage directory)
* See https://issues.apache.org/jira/browse/HDFS-2011
*/
@Test public void testWriteTransactionIdHandlesIOE() throws Exception {
LOG.info("Check IOException handled correctly by writeTransactionIdFile");
ArrayList fsImageDirs=new ArrayList();
ArrayList editsDirs=new ArrayList();
File filePath=new File(PathUtils.getTestDir(getClass()),"storageDirToCheck");
assertTrue("Couldn't create directory storageDirToCheck",filePath.exists() || filePath.mkdirs());
fsImageDirs.add(filePath.toURI());
editsDirs.add(filePath.toURI());
NNStorage nnStorage=new NNStorage(new HdfsConfiguration(),fsImageDirs,editsDirs);
try {
assertTrue("List of storage directories didn't have storageDirToCheck.",nnStorage.getEditsDirectories().iterator().next().toString().indexOf("storageDirToCheck") != -1);
assertTrue("List of removed storage directories wasn't empty",nnStorage.getRemovedStorageDirs().isEmpty());
}
finally {
assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),filePath.delete());
}
nnStorage.writeTransactionIdFileToStorage(1);
List listRsd=nnStorage.getRemovedStorageDirs();
assertTrue("Removed directory wasn't what was expected",listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().toString().indexOf("storageDirToCheck") != -1);
nnStorage.close();
}
BooleanVerifier
@Test public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster=null;
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
NameNode nn=cluster.getNameNode();
NamenodeProtocols nnRpc=nn.getRpcServer();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
CheckpointSignature sig=nnRpc.rollEditLog();
sig.setBlockpoolID("somerandomebpid");
sig.clusterID="somerandomcid";
try {
sig.validateStorageInfo(nn.getFSImage());
assertTrue("This test is expected to fail.",false);
}
catch ( Exception ignored) {
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that the SecondaryNameNode properly locks its storage directories.
*/
@Test public void testSecondaryNameNodeLocking() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
StorageDirectory savedSd=null;
secondary=startSecondaryNameNode(conf);
NNStorage storage=secondary.getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd=sd;
}
LOG.info("===> Shutting down first 2NN");
secondary.shutdown();
secondary=null;
LOG.info("===> Locking a dir, starting second 2NN");
LOG.info("Trying to lock" + savedSd);
savedSd.lock();
try {
secondary=startSecondaryNameNode(conf);
assertFalse("Should fail to start 2NN when " + savedSd + " is locked",savedSd.isLockSupported());
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked",ioe);
}
finally {
savedSd.unlock();
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Tests the following sequence of events:
* - secondary successfully makes a checkpoint
* - it then fails while trying to upload it
* - it then fails again for the same reason
* - it then tries to checkpoint a third time
*/
@Test public void testCheckpointAfterTwoFailedUploads() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
fail("Should have failed upload");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
}
try {
secondary.doCheckpoint();
fail("Should have failed upload");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
}
finally {
Mockito.reset(faultInjector);
}
secondary.doCheckpoint();
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test case where the name node is reformatted while the secondary namenode
* is running. The secondary should shut itself down if if talks to a NN
* with the wrong namespace.
*/
@Test public void testReformatNNBetweenCheckpoints() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,1);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
int origPort=cluster.getNameNodePort();
int origHttpPort=cluster.getNameNode().getHttpAddress().getPort();
Configuration snnConf=new Configuration(conf);
File checkpointDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary");
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointDir.getAbsolutePath());
secondary=startSecondaryNameNode(snnConf);
secondary.doCheckpoint();
cluster.shutdown();
cluster=null;
try {
Thread.sleep(100);
}
catch ( InterruptedException ie) {
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true).build();
try {
secondary.doCheckpoint();
fail("Should have failed checkpoint against a different namespace");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout=30000) public void testEditFailureBeforeRename() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
RandomAccessFile randFile=new RandomAccessFile(tmpEdits[0],"rw");
randFile.setLength(0);
randFile.close();
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
NamenodeProtocols nn=cluster.getNameNodeRpc();
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
nn.rollEditLog();
RemoteEditLogManifest manifest=nn.getEditLogManifest(1);
RemoteEditLog log=manifest.getLogs().get(0);
NNStorage dstImage=Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.anyObject(),Mockito.anyString());
File mockImageFile=File.createTempFile("image","");
FileOutputStream imageFile=new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1,1,"X",1,NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName,0,dstImage,false);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName,log,dstImage);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName,conf,dstImage,NameNodeFile.IMAGE,0);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
}
finally {
cleanup(cluster);
cluster=null;
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that, an attempt to lock a storage that is already locked by a nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
@Test public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
StorageDirectory savedSd=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd=sd;
}
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
try {
savedSd.lock();
fail("Namenode should not be able to lock a storage that is already locked");
}
catch ( IOException ioe) {
String lockingJvmName=Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName();
String expectedLogMessage="It appears that another namenode" + lockingJvmName + " has already locked the storage directory";
assertTrue("Log output does not contain expected log message: " + expectedLogMessage,logs.getOutput().contains(expectedLogMessage));
}
}
finally {
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster=null;
DistributedFileSystem fs=null;
FileContext fc;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
fc=FileContext.getFileContext(cluster.getURI(0));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
try {
admin.run(args);
}
catch ( IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
}
catch ( Exception e) {
throw new IOException(e);
}
Path file=new Path("namespace.dat");
DFSTestUtil.createFile(fs,file,fileSize,fileSize,blockSize,replication,seed);
checkFile(fs,file,replication);
Path symlink=new Path("file.link");
fc.createSymlink(file,symlink,false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
Collection editsDirs=cluster.getNameEditsDirs(0);
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
assertTrue(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e);
}
final int EXPECTED_TXNS_FIRST_SEG=13;
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
File curDir=new File(ed,"current");
LOG.info("Files in " + curDir + ":\n "+ Joiner.on("\n ").join(curDir.list()));
File originalEdits=new File(curDir,NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits=new File(curDir,NNStorage.getFinalizedEditsFileName(1,EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
GenericTestUtils.assertExists(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection imageDirs=cluster.getNameDirs(0);
for ( URI uri : imageDirs) {
File imageDir=new File(uri.getPath());
File savedImage=new File(imageDir,"current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage,savedImage.exists());
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
checkFile(fs,file,replication);
fc=FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
}
finally {
if (fs != null) fs.close();
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLegacyOivImage() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File tmpDir=Files.createTempDir();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,tmpDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,"2");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
String files1[]=tmpDir.list();
assertEquals("Only one file is expected",1,files1.length);
secondary.doCheckpoint();
secondary.doCheckpoint();
String files2[]=tmpDir.list();
assertEquals("Two files are expected",2,files2.length);
for ( String fName : files2) {
assertFalse(fName.equals(files1[0]));
}
}
finally {
cleanup(secondary);
cleanup(cluster);
tmpDir.delete();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that the secondary namenode correctly deletes temporary edits
* on startup.
*/
@Test(timeout=60000) public void testDeleteTemporaryEditsOnStartup() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
}
secondary.shutdown();
secondary=startSecondaryNameNode(conf);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Did not expect a tmp edits file in directory " + sd.toString(),tmpEdits.length == 0);
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -force options when name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BooleanVerifier
/**
* Test namenode format with -format -clusterid and empty clusterid. Format
* should fail as no valid if was provided.
* @throws IOException
*/
@Test public void testFormatWithEmptyClusterIdOption() throws IOException {
String[] argv={"-format","-clusterid",""};
PrintStream origErr=System.err;
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintStream stdErr=new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv,config);
assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
System.setErr(origErr);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -force -nonInteractive -force option. Format
* should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveAndForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-nonInteractive","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter N when prompted and format should be aborted.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterNo() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("N\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should not have succeeded",1,e.status);
}
System.setIn(origIn);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option when an empty name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithEmptyDir() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -nonInteractive options when name
* directory does not exist. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException {
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
System.setIn(origIn);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BooleanVerifier
/**
* Test namenode format with -clusterid -force option. Format command should
* fail as no cluster id was provided.
* @throws IOException
*/
@Test public void testFormatWithInvalidClusterIdOption() throws IOException {
String[] argv={"-format","-clusterid","-force"};
PrintStream origErr=System.err;
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintStream stdErr=new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv,config);
assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
System.setErr(origErr);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifier BooleanVerifier
@Test public void testFormatClusterIdOption() throws IOException {
NameNode.format(config);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
StartupOption.FORMAT.setClusterId("mycluster");
NameNode.format(config);
cid=getClusterId(config);
assertTrue("ClusterId didn't match",cid.equals("mycluster"));
StartupOption.FORMAT.setClusterId("");
NameNode.format(config);
String newCid=getClusterId(config);
assertFalse("ClusterId should not be the same",newCid.equals(cid));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option. Format should succeed.
* @throws IOException
*/
@Test public void testFormat() throws IOException {
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -nonInteractive options when a non empty
* name directory exists. Format should not succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractive() throws IOException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have been aborted with exit code 1",1,e.status);
}
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
BooleanVerifier
/**
* Test namenode format with -format -clusterid options. Format should fail
* was no clusterid was sent.
* @throws IOException
*/
@Test public void testFormatWithNoClusterIdOption() throws IOException {
String[] argv={"-format","-clusterid"};
PrintStream origErr=System.err;
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintStream stdErr=new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv,config);
assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
System.setErr(origErr);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
BooleanVerifier
/**
* Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
* as dead before decommission has completed. That will allow DN to resume
* the replication process after it rejoins the cluster.
*/
@Test(timeout=120000) public void testDecommissionStatusAfterDNRestart() throws IOException, InterruptedException {
DistributedFileSystem fileSys=(DistributedFileSystem)cluster.getFileSystem();
Path f=new Path("decommission.dat");
DFSTestUtil.createFile(fileSys,f,fileSize,fileSize,fileSize,(short)1,seed);
RemoteIterator fileList=fileSys.listLocatedStatus(f);
BlockLocation[] blockLocations=fileList.next().getBlockLocations();
String dnName=blockLocations[0].getNames()[0];
FSNamesystem fsn=cluster.getNamesystem();
final DatanodeManager dm=fsn.getBlockManager().getDatanodeManager();
decommissionNode(fsn,localFileSys,dnName);
dm.refreshNodes(conf);
DataNodeProperties dataNodeProperties=cluster.stopDataNode(dnName);
final List dead=new ArrayList();
while (true) {
dm.fetchDatanodes(null,dead,false);
if (dead.size() == 1) {
break;
}
Thread.sleep(1000);
}
BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
BlockManagerTestUtil.checkDecommissionState(dm,dead.get(0));
assertTrue("the node is in decommissioned state ",!dead.get(0).isDecommissioned());
cluster.restartDataNode(dataNodeProperties,true);
cluster.waitActive();
writeConfigFile(localFileSys,excludeFile,null);
dm.refreshNodes(conf);
cleanupFile(fileSys,f);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the quota can be correctly updated for append
*/
@Test(timeout=60000) public void testUpdateQuotaForAppend() throws Exception {
final Path foo=new Path(dir,"foo");
final Path bar=new Path(foo,"bar");
long currentFileLen=BLOCKSIZE;
DFSTestUtil.createFile(dfs,bar,currentFileLen,REPLICATION,seed);
dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE / 2);
currentFileLen+=(BLOCKSIZE / 2);
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
assertTrue(fooNode.isQuotaSet());
Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
long ns=quota.get(Quota.NAMESPACE);
long ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
ContentSummary c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE);
currentFileLen+=BLOCKSIZE;
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE * 3 + BLOCKSIZE / 8);
currentFileLen+=(BLOCKSIZE * 3 + BLOCKSIZE / 8);
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the quota can be correctly updated for create file
*/
@Test(timeout=60000) public void testQuotaUpdateWithFileCreate() throws Exception {
final Path foo=new Path(dir,"foo");
Path createdFile=new Path(foo,"created_file.data");
dfs.mkdirs(foo);
dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
long fileLen=BLOCKSIZE * 2 + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs,createdFile,BLOCKSIZE / 16,fileLen,BLOCKSIZE,REPLICATION,seed);
INode fnode=fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
Quota.Counts cnt=fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,cnt.get(Quota.NAMESPACE));
assertEquals(fileLen * REPLICATION,cnt.get(Quota.DISKSPACE));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test public void testLoadingWithGaps() throws IOException {
File f1=new File(TEST_DIR + "/gaptest0");
List editUris=ImmutableList.of(f1.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startGapTxId=1 * TXNS_PER_ROLL + 1;
final long endGapTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
try {
editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
fail("Should have thrown exception");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe);
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test public void testEditLogFailOverFromMissing() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testEditChecksum() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
LinkedList sds=new LinkedList();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for ( StorageDirectory sd : sds) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,3);
assertTrue(editFile.exists());
long fileLen=editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertNotNull("Cause of exception should be ChecksumException",e.getCause());
assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass());
}
}
APIUtilityVerifier BooleanVerifier
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on
* close/close sequence. See HDFS-2011.
*/
@Test public void testEditLogFileOutputStreamCloseClose() throws IOException {
EditLogFileOutputStream editLogStream=new EditLogFileOutputStream(conf,TEST_EDITS,0);
editLogStream.close();
try {
editLogStream.close();
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("Trying to use aborted output stream"));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testAllEditsDirFailOnWrite() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,true,true);
invalidateEditsDirAtIndex(1,true,true);
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, " + " should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "No journals available to flush. " + "Unsynced transactions: 1",re);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testSingleRequiredFailedEditsDirOnSetReadyToFlush() throws IOException {
String[] editsDirs=cluster.getConfiguration(0).getTrimmedStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
shutDownMiniCluster();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,editsDirs[0]);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0);
setUpMiniCluster(conf,true);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
JournalAndStream nonRequiredJas=getJournalAndStream(1);
EditLogFileOutputStream nonRequiredSpy=spyOnStream(nonRequiredJas);
assertTrue(nonRequiredJas.isActive());
try {
doAnEdit();
fail("A single failure of a required journal should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("setReadyToFlush failed for required journal",re);
}
Mockito.verify(nonRequiredSpy,Mockito.never()).setReadyToFlush();
assertFalse(nonRequiredJas.isActive());
}
BooleanVerifier
@Test public void testSingleFailedEditsDirOnFlush() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,true,false);
assertTrue(doAnEdit());
assertFalse(cluster.getNameNode().isInSafeMode());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testAllEditsDirsFailOnFlush() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,true,false);
invalidateEditsDirAtIndex(1,true,false);
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, " + "should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage. " + "Unsynced transactions: 1",re);
}
}
BooleanVerifier
@Test public void testSingleFailedEditsDirOnSetReadyToFlush() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
assertTrue(doAnEdit());
assertFalse(cluster.getNameNode().isInSafeMode());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush() throws IOException {
shutDownMiniCluster();
Configuration conf=new HdfsConfiguration();
String[] nameDirs=new String[4];
for (int i=0; i < nameDirs.length; i++) {
File nameDir=new File(PathUtils.getTestDir(getClass()),"name-dir" + i);
nameDir.mkdirs();
nameDirs[i]=nameDir.getAbsolutePath();
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,StringUtils.join(nameDirs,","));
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,2);
setUpMiniCluster(conf,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(1,false,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(2,false,false);
try {
doAnEdit();
fail("A failure of more than the minimum number of redundant journals " + "should have halted ");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "setReadyToFlush failed for too many journals. " + "Unsynced transactions: 1",re);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test public void testEditLogRolling() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
StorageDirectory sd=fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem,caughtErr);
long previousLogTxId=1;
for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig=namesystem.rollEditLog();
long nextLog=sig.curSegmentTxId;
String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1);
previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId);
assertEquals(previousLogTxId,nextLog);
File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists());
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
JournalAndStream jas=editLog.getJournals().get(0);
EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterFlush=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
}
;
Answer blockingFlush=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
}
;
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test public void testSaveRightBeforeSync() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=spy(fsimage.getEditLog());
fsimage.editLog=editLog;
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterSync=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
}
;
Answer blockingSync=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
}
;
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDoubleBuffer() throws IOException {
EditsDoubleBuffer buf=new EditsDoubleBuffer(1024);
assertTrue(buf.isFlushed());
byte[] data=new byte[100];
buf.writeRaw(data,0,data.length);
assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes());
assertTrue("Writing to current buffer should not affect flush state",buf.isFlushed());
buf.setReadyToFlush();
assertEquals("Swapping buffers should still count buffered bytes",data.length,buf.countBufferedBytes());
assertFalse(buf.isFlushed());
DataOutputBuffer outBuf=new DataOutputBuffer();
buf.flushTo(outBuf);
assertEquals(data.length,outBuf.getLength());
assertTrue(buf.isFlushed());
assertEquals(0,buf.countBufferedBytes());
buf.writeRaw(data,0,data.length);
assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes());
buf.setReadyToFlush();
buf.flushTo(outBuf);
assertEquals(data.length * 2,outBuf.getLength());
assertEquals(0,buf.countBufferedBytes());
outBuf.close();
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception {
List existingXAttrs=Lists.newArrayList();
List toAdd=Lists.newArrayList();
toAdd.add(generatedXAttrs.get(0));
toAdd.add(generatedXAttrs.get(1));
toAdd.add(generatedXAttrs.get(2));
toAdd.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Specified the same xattr to be set twice");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e);
}
toAdd.remove(generatedXAttrs.get(0));
existingXAttrs.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Set XAttr that is already set without REPLACE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("already exists",e);
}
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
fail("Set XAttr that does not exist without the CREATE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("does not exist",e);
}
toAdd.remove(generatedXAttrs.get(0));
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
assertEquals("Unexpected toAdd size",2,toAdd.size());
for ( XAttr x : toAdd) {
assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x));
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 3; i++) {
XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build();
toAdd.add(xAttr);
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size());
for (int i=0; i < 3; i++) {
assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue());
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 4; i++) {
toAdd.add(generatedXAttrs.get(i));
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
verifyXAttrsPresent(newXAttrs,4);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Dump the tree, make some changes, and then dump the tree again.
*/
@Test public void testDumpTree() throws Exception {
final INode root=fsdir.getINode("/");
LOG.info("Original tree");
final StringBuffer b1=root.dumpTreeRecursively();
System.out.println("b1=" + b1);
final BufferedReader in=new BufferedReader(new StringReader(b1.toString()));
String line=in.readLine();
checkClassName(line);
for (; (line=in.readLine()) != null; ) {
line=line.trim();
if (!line.isEmpty() && !line.contains("snapshot")) {
assertTrue("line=" + line,line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDisplayRecentEditLogOpCodes() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
for (int i=0; i < 20; i++) {
fileSys.mkdirs(new Path("/tmp/tmp" + i));
}
StorageDirectory sd=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
cluster.shutdown();
File editFile=FSImageTestUtil.findLatestEditsLog(sd).getFile();
assertTrue("Should exist: " + editFile,editFile.exists());
long fileLen=editFile.length();
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 40);
for (int i=0; i < 20; i++) {
rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
}
rwf.close();
StringBuilder bld=new StringBuilder();
bld.append("^Error replaying edit log at offset \\d+. ");
bld.append("Expected transaction ID was \\d+\n");
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertTrue("error message contains opcodes message",e.getMessage().matches(bld.toString()));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEmptyEditLog() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEmptyEditLog");
SortedMap offsetToTxId=Maps.newTreeMap();
File logFile=prepareUnfinalizedTestEditLog(testDir,0,offsetToTxId);
truncateFile(logFile,8);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(HdfsConstants.INVALID_TXID,validation.getEndTxId());
}
APIUtilityVerifier BooleanVerifier
@Test public void testValidateEditLogWithCorruptHeader() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptHeader");
SortedMap offsetToTxId=Maps.newTreeMap();
File logFile=prepareUnfinalizedTestEditLog(testDir,2,offsetToTxId);
RandomAccessFile rwf=new RandomAccessFile(logFile,"rw");
try {
rwf.seek(0);
rwf.writeLong(42);
}
finally {
rwf.close();
}
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(validation.hasCorruptHeader());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEditLogWithCorruptBody() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptBody");
SortedMap offsetToTxId=Maps.newTreeMap();
final int NUM_TXNS=20;
File logFile=prepareUnfinalizedTestEditLog(testDir,NUM_TXNS,offsetToTxId);
File logFileBak=new File(testDir,logFile.getName() + ".bak");
Files.copy(logFile,logFileBak);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(NUM_TXNS + 1,validation.getEndTxId());
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
corruptByteInFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1);
assertEquals("Failed when corrupting txn opcode at " + txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
truncateFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == 0) ? HdfsConstants.INVALID_TXID : (txId - 1);
assertEquals("Failed when corrupting txid " + txId + " txn opcode "+ "at "+ txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Simple test with image, edits, and inprogress edits
*/
@Test public void testCurrentStorageInspector() throws IOException {
FSImageTransactionalStorageInspector inspector=new FSImageTransactionalStorageInspector();
StorageDirectory mockDir=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS,false,"/foo/current/" + getImageFileName(123),"/foo/current/" + getFinalizedEditsFileName(123,456),"/foo/current/" + getImageFileName(456),"/foo/current/" + getInProgressEditsFileName(457));
inspector.inspectDirectory(mockDir);
assertEquals(2,inspector.foundImages.size());
FSImageFile latestImage=inspector.getLatestImages().get(0);
assertEquals(456,latestImage.txId);
assertSame(mockDir,latestImage.sd);
assertTrue(inspector.isUpgradeFinalized());
assertEquals(new File("/foo/current/" + getImageFileName(456)),latestImage.getFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test when there is snapshot taken on root
*/
@Test public void testSnapshotOnRoot() throws Exception {
final Path root=new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root,"s1");
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory();
assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
List diffList=rootNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null);
assertEquals(root,sdirs[0].getFullPath());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
}
InternalCallVerifier BooleanVerifier
@Test public void testReset() throws Exception {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn=new FSNamesystem(conf,fsImage);
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
fsn.clear();
assertFalse(fsn.isImageLoaded());
final INodeDirectory root=(INodeDirectory)fsn.getFSDirectory().getINode("/");
assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem=new FSNamesystem(conf,fsImage);
FSNamesystem fsn=Mockito.spy(fsNamesystem);
HAContext haContext=Mockito.mock(HAContext.class);
HAState haState=Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn,"haContext",haContext);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first " + "safemode",!fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode",!fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving " + "safemode",fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time",fsn.isPopulatingReplQueues());
}
InternalCallVerifier BooleanVerifier
@Test public void testFsLockFairness() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
conf.setBoolean("dfs.namenode.fslock.fair",true);
FSNamesystem fsNamesystem=new FSNamesystem(conf,fsImage);
assertTrue(fsNamesystem.getFsLockForTests().isFair());
conf.setBoolean("dfs.namenode.fslock.fair",false);
fsNamesystem=new FSNamesystem(conf,fsImage);
assertFalse(fsNamesystem.getFsLockForTests().isFair());
}
InternalCallVerifier BooleanVerifier
@Test public void testStartupSafemode() throws IOException {
Configuration conf=new Configuration();
FSImage fsImage=Mockito.mock(FSImage.class);
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn=new FSNamesystem(conf,fsImage);
fsn.leaveSafeMode();
assertTrue("After leaving safemode FSNamesystem.isInStartupSafeMode still " + "returned true",!fsn.isInStartupSafeMode());
assertTrue("After leaving safemode FSNamesystem.isInSafeMode still returned" + " true",!fsn.isInSafeMode());
fsn.enterSafeMode(true);
assertTrue("After entering safemode due to low resources FSNamesystem." + "isInStartupSafeMode still returned true",!fsn.isInStartupSafeMode());
assertTrue("After entering safemode due to low resources FSNamesystem." + "isInSafeMode still returned false",fsn.isInSafeMode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFSNamesystemLockCompatibility(){
FSNamesystemLock rwLock=new FSNamesystemLock(true);
assertEquals(0,rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(1,rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(2,rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(1,rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(0,rwLock.getReadHoldCount());
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0,rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1,rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(2,rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1,rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0,rwLock.getWriteHoldCount());
}
InternalCallVerifier BooleanVerifier
@Test public void testWithFSNamesystemWriteLock() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
FSNamesystem fsn=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fsn=cluster.getNameNode().namesystem;
fsn.writeLock();
MBeanClient client=new MBeanClient();
client.start();
client.join(20000);
assertTrue("JMX calls are blocked when FSNamesystem's writerlock" + "is owned by another thread",client.succeeded);
client.interrupt();
}
finally {
if (fsn != null && fsn.hasWriteLock()) {
fsn.writeUnlock();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void test() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String snapshotStats=(String)(mbs.getAttribute(mxbeanName,"SnapshotStats"));
@SuppressWarnings("unchecked") Map stat=(Map)JSON.parse(snapshotStats);
assertTrue(stat.containsKey("SnapshottableDirectories") && (Long)stat.get("SnapshottableDirectories") == fsn.getNumSnapshottableDirs());
assertTrue(stat.containsKey("Snapshots") && (Long)stat.get("Snapshots") == fsn.getNumSnapshots());
Object pendingDeletionBlocks=mbs.getAttribute(mxbeanName,"PendingDeletionBlocks");
assertNotNull(pendingDeletionBlocks);
assertTrue(pendingDeletionBlocks instanceof Long);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception {
for (int i=0; i < NUM_FILES; i++) {
Random rand=new Random(System.currentTimeMillis() + i);
InetSocketAddress datanode[]=getDatanodes(rand);
Path p=new Path("/filename" + i);
FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,(short)3,4096L,null,datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations=getBlockLocations(p);
for ( BlockLocation loc : locations) {
String[] hosts=loc.getNames();
String[] hosts1=getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts,hosts1));
}
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception {
final InetSocketAddress addrs[]=new InetSocketAddress[4];
final String[] hosts=new String[addrs.length];
for (int i=0; i < addrs.length; i++) {
addrs[i]=datanodes.get(i).getXferAddress();
hosts[i]=addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
}
DatanodeInfo d=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeByXferAddr(addrs[0].getAddress().getHostAddress(),addrs[0].getPort());
d.setDecommissioned();
Path p=new Path("/filename-foo-bar-baz");
final short replication=(short)3;
FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,replication,4096L,null,addrs);
out.write(SOME_BYTES);
out.close();
d.stopDecommission();
BlockLocation[] locations=getBlockLocations(p);
Assert.assertEquals(replication,locations[0].getNames().length);
;
for (int i=0; i < replication; i++) {
final String loc=locations[0].getNames()[i];
int j=0;
for (; j < hosts.length && !loc.equals(hosts[j]); j++) ;
Assert.assertTrue("j=" + j,j > 0);
Assert.assertTrue("loc=" + loc + " not in host list "+ Arrays.asList(hosts)+ ", j="+ j,j < hosts.length);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(100,getNumberOfTransactions(jm,1,false,false));
EditLogInputStream elis=getJournalInputStream(jm,90,false);
try {
FSEditLogOp lastReadOp=null;
while ((lastReadOp=elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test public void testManyLogsWithGaps() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId=3 * TXNS_PER_ROLL + 1;
final long endGapTxId=4 * TXNS_PER_ROLL;
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true));
assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true));
assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=IllegalStateException.class) public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
File f=new File(TestEditLog.TEST_DIR + "/filejournaltestError");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
String sdRootPath=sd.getRoot().getAbsolutePath();
FileUtil.chmod(sdRootPath,"-w",true);
try {
jm.finalizeLogSegment(0,1);
}
finally {
FileUtil.chmod(sdRootPath,"+w",true);
assertTrue(storage.getRemovedStorageDirs().contains(sd));
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that file data becomes available before file is closed.
*/
@Test public void testFileLimit() throws IOException {
Configuration conf=new HdfsConfiguration();
int maxObjects=5;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY,maxObjects);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
int currentNodes=0;
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
FSNamesystem namesys=cluster.getNamesystem();
try {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
currentNodes=1;
for (int i=0; i < maxObjects / 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fs,file,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file);
currentNodes+=2;
}
boolean hitException=false;
try {
Path file=new Path("/filestatus");
DFSTestUtil.createFile(fs,file,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file);
}
catch ( IOException e) {
hitException=true;
}
assertTrue("Was able to exceed file limit",hitException);
Path file0=new Path("/filestatus0");
fs.delete(file0,true);
System.out.println("Deleted file " + file0);
currentNodes-=2;
waitForLimit(namesys,currentNodes);
DFSTestUtil.createFile(fs,file0,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file0 + " again.");
currentNodes+=2;
file0=new Path("/filestatus0");
fs.delete(file0,true);
System.out.println("Deleted file " + file0 + " again.");
currentNodes-=2;
waitForLimit(namesys,currentNodes);
Path dir=new Path("/dir0/dir1");
fs.mkdirs(dir);
System.out.println("Created directories " + dir);
currentNodes+=2;
waitForLimit(namesys,currentNodes);
hitException=false;
try {
fs.mkdirs(new Path("dir.fail"));
System.out.println("Created directory should not have succeeded.");
}
catch ( IOException e) {
hitException=true;
}
assertTrue("Was able to exceed dir limit",hitException);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if fsck can return -1 in case of failure
* @throws Exception
*/
@Test public void testFsckError() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
String fileName="/test.txt";
Path filePath=new Path(fileName);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L);
DFSTestUtil.waitReplication(fs,filePath,(short)1);
INodeFile node=(INodeFile)cluster.getNamesystem().dir.getNode(fileName,true);
final BlockInfo[] blocks=node.getBlocks();
assertEquals(blocks.length,1);
blocks[0].setNumBytes(-1L);
String outStr=runFsck(conf,-1,true,fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
fs.delete(filePath,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test fsck with FileNotFound
*/
@Test public void testFsckFileNotFound() throws Exception {
final short NUM_REPLICAS=1;
Configuration conf=new Configuration();
NameNode namenode=mock(NameNode.class);
NetworkTopology nettop=mock(NetworkTopology.class);
Map pmap=new HashMap();
Writer result=new StringWriter();
PrintWriter out=new PrintWriter(result,true);
InetAddress remoteAddress=InetAddress.getLocalHost();
FSNamesystem fsName=mock(FSNamesystem.class);
BlockManager blockManager=mock(BlockManager.class);
DatanodeManager dnManager=mock(DatanodeManager.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockLocations(anyString(),anyLong(),anyLong(),anyBoolean(),anyBoolean(),anyBoolean())).thenThrow(new FileNotFoundException());
when(fsName.getBlockManager()).thenReturn(blockManager);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress);
String pathString="/tmp/testFile";
long length=123L;
boolean isDir=false;
int blockReplication=1;
long blockSize=128 * 1024L;
long modTime=123123123L;
long accessTime=123123120L;
FsPermission perms=FsPermission.getDefault();
String owner="foo";
String group="bar";
byte[] symlink=null;
byte[] path=new byte[128];
path=DFSUtil.string2Bytes(pathString);
long fileId=312321L;
int numChildren=1;
HdfsFileStatus file=new HdfsFileStatus(length,isDir,blockReplication,blockSize,modTime,accessTime,perms,owner,group,symlink,path,fileId,numChildren,null);
Result res=new Result(conf);
try {
fsck.check(pathString,file,res);
}
catch ( Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(res.toString().contains("HEALTHY"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test fsck with permission set on inodes
*/
@Test public void testFsckPermission() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(20).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
final MiniDFSCluster c2=cluster;
final String dir="/dfsck";
final Path dirpath=new Path(dir);
final FileSystem fs=c2.getFileSystem();
util.createFiles(fs,dir);
util.waitReplication(fs,dir,(short)3);
fs.setPermission(dirpath,new FsPermission((short)0700));
UserGroupInformation fakeUGI=UserGroupInformation.createUserForTesting("ProbablyNotARealUserName",new String[]{"ShangriLa"});
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
System.out.println(runFsck(conf,-1,true,dir));
return null;
}
}
);
fs.setPermission(dirpath,new FsPermission((short)0777));
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final String outStr=runFsck(conf,0,true,dir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
return null;
}
}
);
util.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* do fsck
*/
@Test public void testFsck() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
util.waitReplication(fs,fileName,(short)3);
final Path file=new Path(fileName);
long aTime=fs.getFileStatus(file).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fs.getFileStatus(file).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
outStr=runFsck(conf,1,true,"/");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
System.out.println(outStr);
cluster.startDataNodes(conf,4,true,null,null);
cluster.waitActive();
cluster.waitClusterUp();
fs=cluster.getFileSystem();
util.cleanup(fs,"/srcdat");
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCorruptBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
Random random=new Random();
String outStr=null;
short factor=1;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs,file1,1024,factor,0);
DFSTestUtil.waitReplication(fs,file1,factor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
outStr=runFsck(conf,0,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
File blockFile=MiniDFSCluster.getBlockFile(0,block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile=new RandomAccessFile(blockFile,"rw");
FileChannel channel=raFile.getChannel();
String badString="BADBAD";
int rand=random.nextInt((int)channel.size() / 2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
try {
IOUtils.copyBytes(fs.open(file1),new IOUtils.NullOutputStream(),conf,true);
}
catch ( IOException ie) {
}
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
}
assertTrue(blocks.get(0).isCorrupt());
outStr=runFsck(conf,1,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test for including the snapshot files in fsck report
*/
@Test public void testFsckForSnapshotFiles() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
String runFsck=runFsck(conf,0,true,"/","-includeSnapshots","-files");
assertTrue(runFsck.contains("HEALTHY"));
final String fileName="/srcdat";
DistributedFileSystem hdfs=cluster.getFileSystem();
Path file1=new Path(fileName);
DFSTestUtil.createFile(hdfs,file1,1024,(short)1,1000L);
hdfs.allowSnapshot(new Path("/"));
hdfs.createSnapshot(new Path("/"),"mySnapShot");
runFsck=runFsck(conf,0,true,"/","-includeSnapshots","-files");
assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
runFsck=runFsck(conf,0,true,"/","-files");
assertFalse(runFsck.contains("mySnapShot"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMoveAndDelete() throws Exception {
final int MAX_MOVE_TRIES=5;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsckMoveAndDelete").setNumFiles(5).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
String[] fileNames=util.getFileNames(topDir);
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String corruptFileName=fileNames[0];
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(corruptFileName,0,Long.MAX_VALUE).get(0).getBlock();
for (int i=0; i < 4; i++) {
File blockFile=MiniDFSCluster.getBlockFile(i,block);
if (blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete());
}
}
outStr=runFsck(conf,1,false,"/");
while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
outStr=runFsck(conf,1,false,"/");
}
for (int i=0; i < MAX_MOVE_TRIES; i++) {
outStr=runFsck(conf,1,true,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] newFileNames=util.getFileNames(topDir);
boolean found=false;
for ( String f : newFileNames) {
if (f.equals(corruptFileName)) {
found=true;
break;
}
}
assertTrue(found);
}
outStr=runFsck(conf,1,true,"/","-move","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMove() throws Exception {
Configuration conf=new HdfsConfiguration();
final int DFS_BLOCK_SIZE=1024;
final int NUM_DATANODES=4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
DFSTestUtil util=new DFSTestUtil("TestFsck",5,3,(5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1),5 * DFS_BLOCK_SIZE);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String fileNames[]=util.getFileNames(topDir);
CorruptedTestFile ctFiles[]=new CorruptedTestFile[]{new CorruptedTestFile(fileNames[0],Sets.newHashSet(0),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[1],Sets.newHashSet(2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[2],Sets.newHashSet(4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[3],Sets.newHashSet(0,1,2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[4],Sets.newHashSet(1,2,3,4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE)};
int totalMissingBlocks=0;
for ( CorruptedTestFile ctFile : ctFiles) {
totalMissingBlocks+=ctFile.getTotalMissingBlocks();
}
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.removeBlocks();
}
while (true) {
outStr=runFsck(conf,1,false,"/");
String numCorrupt=null;
for ( String line : outStr.split(LINE_SEPARATOR)) {
Matcher m=numCorruptBlocksPattern.matcher(line);
if (m.matches()) {
numCorrupt=m.group(1);
break;
}
}
if (numCorrupt == null) {
throw new IOException("failed to find number of corrupt " + "blocks in fsck output.");
}
if (numCorrupt.equals(Integer.toString(totalMissingBlocks))) {
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
break;
}
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
}
outStr=runFsck(conf,1,false,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.checkSalvagedRemains();
}
outStr=runFsck(conf,1,true,"/","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test for checking fsck command on illegal arguments should print the proper
* usage.
*/
@Test public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
String fileName="/test.txt";
Path filePath=new Path(fileName);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L);
DFSTestUtil.waitReplication(fs,filePath,(short)1);
String outStr=runFsck(conf,-1,true,fileName,"-thisIsNotAValidFlag");
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
outStr=runFsck(conf,-1,true,"/",fileName);
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
fs.delete(filePath,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test fsck with symlinks in the filesystem
*/
@Test public void testFsckSymlink() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
final FileContext fc=FileContext.getFileContext(cluster.getConfiguration(0));
final Path file=new Path(fileName);
final Path symlink=new Path("/srcdat-symlink");
fc.createSymlink(file,symlink,false);
util.waitReplication(fs,fileName,(short)3);
long aTime=fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs,fileName);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if option -list-corruptfiles of fsck command works properly
*/
@Test public void testFsckListCorruptFilesBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData",(short)1);
util.waitReplication(fs,"/corruptData",(short)1);
String outStr=runFsck(conf,0,false,"/corruptData","-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.getFiles().length;
}
outStr=runFsck(conf,-1,true,"/corruptData","-list-corruptfileblocks");
System.out.println("2. bad fsck out: " + outStr);
assertTrue(outStr.contains("has 3 CORRUPT files"));
util.createFiles(fs,"/goodData");
outStr=runFsck(conf,0,true,"/goodData","-list-corruptfileblocks");
System.out.println("3. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckOpenFiles() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(4).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
String randomString="HADOOP ";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
Path openFile=new Path(topDir + "/openFile");
FSDataOutputStream out=fs.create(openFile);
int writeCount=0;
while (writeCount != 100) {
out.write(randomString.getBytes());
writeCount++;
}
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
outStr=runFsck(conf,0,true,topDir,"-openforwrite");
System.out.println(outStr);
assertTrue(outStr.contains("OPENFORWRITE"));
assertTrue(outStr.contains("openFile"));
out.close();
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that a dummy implementation of JournalManager can
* be initialized on startup
*/
@Test public void testDummyJournalManager() throws Exception {
MiniDFSCluster cluster=null;
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",DummyJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,DUMMY_URI);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
assertTrue(DummyJournalManager.shouldPromptCalled);
assertTrue(DummyJournalManager.formatCalled);
assertNotNull(DummyJournalManager.conf);
assertEquals(new URI(DUMMY_URI),DummyJournalManager.uri);
assertNotNull(DummyJournalManager.nsInfo);
assertEquals(DummyJournalManager.nsInfo.getClusterID(),cluster.getNameNode().getNamesystem().getClusterId());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidRequestor() throws IOException {
Configuration conf=new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),"host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"ns1","nn1"),"hdfs/_HOST@TEST-REALM.COM");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),"host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"ns1","nn2"),"hdfs/_HOST@TEST-REALM.COM");
NameNode.initializeGenericKeys(conf,"ns1","nn1");
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
assertTrue(ImageServlet.isValidRequestor(context,"hdfs/host2@TEST-REALM.COM",conf));
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher(){
@Override public boolean matches( Object argument){
return ((UserGroupInformation)argument).getShortUserName().equals("atm");
}
}
))).thenReturn(true);
assertTrue(ImageServlet.isValidRequestor(context,"hdfs/host2@TEST-REALM.COM",conf));
assertTrue(ImageServlet.isValidRequestor(context,"atm@TEST-REALM.COM",conf));
assertFalse(ImageServlet.isValidRequestor(context,"todd@TEST-REALM.COM",conf));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the concat operation is properly persisted in the
* edit log, and properly replayed on restart.
*/
@Test public void testConcatInEditLog() throws Exception {
final Path TEST_DIR=new Path("/testConcatInEditLog");
final long FILE_LEN=blockSize;
Path[] srcFiles=new Path[3];
for (int i=0; i < srcFiles.length; i++) {
Path path=new Path(TEST_DIR,"src-" + i);
DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1);
srcFiles[i]=path;
}
Path targetFile=new Path(TEST_DIR,"target");
DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1);
dfs.concat(targetFile,srcFiles);
assertTrue(dfs.exists(targetFile));
FileStatus origStatus=dfs.getFileStatus(targetFile);
cluster.restartNameNode(true);
assertTrue(dfs.exists(targetFile));
assertFalse(dfs.exists(srcFiles[0]));
FileStatus statusAfterRestart=dfs.getFileStatus(targetFile);
assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* test illegal args cases
*/
@Test public void testIllegalArg() throws IOException {
long fileLen=blockSize * 3;
Path parentDir=new Path("/parentTrg");
assertTrue(dfs.mkdirs(parentDir));
Path trg=new Path(parentDir,"trg");
DFSTestUtil.createFile(dfs,trg,fileLen,REPL_FACTOR,1);
{
Path dir1=new Path("/dir1");
assertTrue(dfs.mkdirs(dir1));
Path src=new Path(dir1,"src");
DFSTestUtil.createFile(dfs,src,fileLen,REPL_FACTOR,1);
try {
dfs.concat(trg,new Path[]{src});
fail("didn't fail for src and trg in different directories");
}
catch ( Exception e) {
}
}
try {
dfs.concat(trg,new Path[]{new Path("test1/a")});
fail("didn't fail with invalid arguments");
}
catch ( Exception e) {
}
try {
dfs.concat(trg,new Path[]{});
fail("didn't fail with invalid arguments");
}
catch ( Exception e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsExcludeInUI() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
String names=name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file "+ excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes=(String)mbs.getAttribute(mxbeanName,"LiveNodes");
assertTrue("Live nodes should contain the decommissioned node",nodes.contains("Decommissioned"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf=getConf();
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777").append("\n");
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
assertTrue(ns.getNumDeadDataNodes() == 2);
assertTrue(ns.getNumLiveDataNodes() == 0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String nodes=mbs.getAttribute(mxbeanName,"NumDeadDataNodes") + "";
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumDeadDataNodes") == 2);
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumLiveDataNodes") == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testFilesInGetListingOps() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
hdfs.mkdirs(new Path("/tmp"));
DFSTestUtil.createFile(hdfs,new Path("/tmp/f1"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f2"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f3"),0,(short)1,0);
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp",HdfsFileStatus.EMPTY_NAME,false);
assertTrue(dl.getPartialListing().length == 3);
String f2=new String("f2");
dl=cluster.getNameNodeRpc().getListing("/tmp",f2.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
INode f2INode=fsdir.getINode("/tmp/f2");
String f2InodePath="/.reserved/.inodes/" + f2INode.getId();
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
hdfs.delete(new Path("/tmp/f2"),false);
try {
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
fail("Didn't get exception for the deleted startAfter token.");
}
catch ( IOException e) {
assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileUnderConstruction(){
replication=3;
final INodeFile file=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
assertFalse(file.isUnderConstruction());
final String clientName="client";
final String clientMachine="machine";
file.toUnderConstruction(clientName,clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc=file.getFileUnderConstructionFeature();
assertEquals(clientName,uc.getClientName());
assertEquals(clientMachine,uc.getClientMachine());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests for addressing files using /.reserved/.inodes/ in file system
* operations.
*/
@Test public void testInodeIdBasedPaths() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
NamenodeProtocols nnRpc=cluster.getNameNodeRpc();
Path baseDir=getInodePath(INodeId.ROOT_INODE_ID,"testInodeIdBasedPaths");
Path baseDirRegPath=new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId=nnRpc.getFileInfo(baseDir.toString()).getFileId();
Path testFileInodePath=getInodePath(baseDirFileId,"test1");
Path testFileRegularPath=new Path(baseDir,"test1");
final int testFileBlockSize=1024;
FileSystemTestHelper.createFile(fs,testFileInodePath,1,testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
FsPermission perm=new FsPermission((short)0666);
fs.setPermission(testFileInodePath,perm);
FileStatus fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(perm,fileStatus.getPermission());
fs.setOwner(testFileInodePath,fileStatus.getOwner(),fileStatus.getGroup());
fs.setTimes(testFileInodePath,0,0);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(0,fileStatus.getModificationTime());
assertEquals(0,fileStatus.getAccessTime());
fs.setReplication(testFileInodePath,(short)3);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(3,fileStatus.getReplication());
fs.setReplication(testFileInodePath,(short)1);
assertEquals(testFileBlockSize,nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath,FsAction.READ_WRITE);
}
String invalidTarget=new Path(baseDir,"invalidTarget").toString();
String link=new Path(baseDir,"link").toString();
testInvalidSymlinkTarget(nnRpc,invalidTarget,link);
String validTarget="/validtarget";
testValidSymlinkTarget(nnRpc,validTarget,link);
fs.append(testFileInodePath);
fs.recoverLease(testFileInodePath);
LocatedBlocks l1=nnRpc.getBlockLocations(testFileInodePath.toString(),0,Long.MAX_VALUE);
LocatedBlocks l2=nnRpc.getBlockLocations(testFileRegularPath.toString(),0,Long.MAX_VALUE);
checkEquals(l1,l2);
Path renameDst=getInodePath(baseDirFileId,"test2");
fileStatus=fs.getFileStatus(testFileInodePath);
fs.rename(testFileInodePath,renameDst);
fs.rename(renameDst,testFileInodePath);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
fs.rename(testFileInodePath,renameDst,Rename.OVERWRITE);
fs.rename(renameDst,testFileInodePath,Rename.OVERWRITE);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
assertEquals(fs.getContentSummary(testFileRegularPath).toString(),fs.getContentSummary(testFileInodePath).toString());
checkEquals(fs.listFiles(baseDirRegPath,false),fs.listFiles(baseDir,false));
fs.delete(testFileInodePath,true);
assertFalse(fs.exists(testFileInodePath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test public void testInodeId() throws IOException {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
long lastId=fsn.getLastInodeId();
int inodeCount=1;
long expectedLastInodeId=INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(),INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId,lastId);
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId,fsn.getLastInodeId());
assertEquals(++inodeCount,fsn.dir.getInodeMapSize());
NamenodeProtocols nnrpc=cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs,new Path("/test1/file"),1024,(short)1,0);
assertEquals(++expectedLastInodeId,fsn.getLastInodeId());
assertEquals(++inodeCount,fsn.dir.getInodeMapSize());
HdfsFileStatus fileStatus=nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId,fileStatus.getFileId());
Path renamedPath=new Path("/test2");
assertTrue(fs.rename(path,renamedPath));
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertTrue(fs.delete(renamedPath,true));
inodeCount-=2;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
String file1="/test1/file1";
String file2="/test1/file2";
DFSTestUtil.createFile(fs,new Path(file1),512,(short)1,0);
DFSTestUtil.createFile(fs,new Path(file2),512,(short)1,0);
inodeCount+=3;
expectedLastInodeId+=3;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
nnrpc.concat(file2,new String[]{file1});
inodeCount--;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"),true));
inodeCount-=2;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
cluster.restartNameNode();
cluster.waitActive();
fsn=cluster.getNamesystem();
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
DFSTestUtil.createFile(fs,new Path("/test2/file2"),1024,(short)1,0);
expectedLastInodeId+=2;
inodeCount+=2;
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
FSDataOutputStream outStream=fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId+=2;
inodeCount+=2;
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
fsn.enterSafeMode(false);
fsn.saveNamespace();
fsn.leaveSafeMode();
outStream.close();
cluster.restartNameNode();
cluster.waitActive();
fsn=cluster.getNamesystem();
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testWriteToDeletedFile() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/test1");
assertTrue(fs.mkdirs(path));
int size=conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512);
byte[] data=new byte[size];
Path filePath=new Path("/test1/file");
FSDataOutputStream fos=fs.create(filePath);
fs.delete(filePath,false);
try {
fos.write(data,0,data.length);
fos.hflush();
fail("Write should fail after delete");
}
catch ( Exception e) {
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Check /.reserved path is reserved and cannot be created.
*/
@Test public void testReservedFileNames() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
ensureReservedFileNamesCannotBeCreated(fs,"/.reserved",false);
ensureReservedFileNamesCannotBeCreated(fs,"/.reserved",false);
Path reservedPath=new Path("/.reserved");
FSDirectory.CHECK_RESERVED_FILE_NAMES=false;
fs.mkdirs(reservedPath);
assertTrue(fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
FSDirectory.CHECK_RESERVED_FILE_NAMES=false;
ensureClusterRestartSucceeds(cluster);
fs.delete(reservedPath,true);
DFSTestUtil.createFile(fs,reservedPath,10,(short)1,0L);
assertTrue(!fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDotdotInodePath() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
DFSClient client=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
long dirId=fsdir.getINode(dir.toString()).getId();
long parentId=fsdir.getINode("/").getId();
String testPath="/.reserved/.inodes/" + dirId + "/..";
client=new DFSClient(NameNode.getAddress(conf),conf);
HdfsFileStatus status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
testPath="/.reserved/.inodes/" + parentId + "/..";
status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
}
finally {
IOUtils.cleanup(LOG,client);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests for {@link FSDirectory#resolvePath(String,byte[][],FSDirectory)}
*/
@Test public void testInodePath() throws IOException {
String path="/a/b/c";
INode inode=createTreeOfInodes(path);
FSDirectory fsd=Mockito.mock(FSDirectory.class);
Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong());
assertEquals("/test",FSDirectory.resolvePath("/test",null,fsd));
byte[][] components=INode.getPathComponents(path);
String resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1");
resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1/");
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1/d/e/f");
resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals("/a/b/c/d/e/f",resolvedPath);
String testPath="/.reserved/.inodes";
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals(testPath,resolvedPath);
testPath="/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals("/",resolvedPath);
testPath="/.invalid/.inodes/1";
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals(testPath,resolvedPath);
Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong());
testPath="/.reserved/.inodes/1234";
components=INode.getPathComponents(testPath);
try {
String realPath=FSDirectory.resolvePath(testPath,components,fsd);
fail("Path should not be resolved:" + realPath);
}
catch ( IOException e) {
assertTrue(e instanceof FileNotFoundException);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test for the static {@link INodeFile#valueOf(INode,String)}and {@link INodeFileUnderConstruction#valueOf(INode,String)} methods.
* @throws IOException
*/
@Test public void testValueOf() throws IOException {
final String path="/testValueOf";
final short replication=3;
{
final INode from=null;
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("File does not exist"));
}
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
}
{
final INode from=createINodeFile(replication,preferredBlockSize);
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException e) {
}
}
{
final INode from=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
from.asFile().toUnderConstruction("client","machine");
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException expected) {
}
}
{
final INode from=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,null,perm,0L);
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("Path is not a file"));
}
final INodeDirectory d=INodeDirectory.valueOf(from,path);
assertTrue(d == from);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocationLimitInListingOps() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,9);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
ArrayList source=new ArrayList();
hdfs.mkdirs(new Path("/tmp1"));
hdfs.mkdirs(new Path("/tmp2"));
source.add("f1");
source.add("f2");
int numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp1/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)3,0);
}
byte[] start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp1",start,true);
assertTrue(dl.getPartialListing().length == 1);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
source.add("f1");
source.add("f2");
source.add("f3");
source.add("f4");
source.add("f5");
source.add("f6");
numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp2/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)1,0);
}
start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries / 3; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp2",start,true);
assertTrue(dl.getPartialListing().length == 3);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
/**
* Test whether the inode in inodeMap has been replaced after regular inode
* replacement
*/
@Test public void testInodeReplacement() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
INodeDirectory dirNode=getDir(fsdir,dir);
INode dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
dirNode=getDir(fsdir,dir);
assertTrue(dirNode.isWithQuota());
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,-1,-1);
dirNode=getDir(fsdir,dir);
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen=1024;
replication=3;
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
FSDirectory fsdir=fsn.getFSDirectory();
DistributedFileSystem dfs=cluster.getFileSystem();
final Path dir=new Path("/dir");
final Path file=new Path(dir,"file");
DFSTestUtil.createFile(dfs,file,fileLen,replication,0L);
INode fnode=fsdir.getINode(file.toString());
assertEquals(file.toString(),fnode.getFullPathName());
dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10);
INodeDirectory dirNode=getDir(fsdir,dir);
assertEquals(dir.toString(),dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir=new Path("/newdir");
final Path newFile=new Path(newDir,"file");
dfs.rename(dir,newDir,Options.Rename.OVERWRITE);
fnode=fsdir.getINode(newFile.toString());
assertEquals(newFile.toString(),fnode.getFullPathName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/
@Test(timeout=300000) public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1.5f);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,0f);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
Collection badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
File storageDir=cluster.getInstanceStorageDir(0,0);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
cluster.restartNameNode(0);
fs=cluster.getFileSystem();
while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
try {
LOG.info("waiting for replication queues");
Thread.sleep(1000);
}
catch ( InterruptedException ignore) {
}
}
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
assertTrue("Namenode is not in safe mode",cluster.getNameNode().isInSafeMode());
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
util.cleanup(fs,"/srcdat10");
}
catch ( Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test if NN.listCorruptFiles() returns the right number of results.
* The corrupt blocks are detected by the BlockPoolSliceScanner.
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test(timeout=300000) public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
final int maxCorruptFileBlocks=FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testMaxCorruptFiles").setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat2",(short)1);
util.waitReplication(fs,"/srcdat2",(short)1);
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",badFiles.size() == 0);
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
LOG.info("Removing files from " + data_dir);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
cluster.restartDataNodes();
cluster.waitActive();
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
while (badFiles.size() < maxCorruptFileBlocks) {
LOG.info("# of corrupt files is: " + badFiles.size());
Thread.sleep(10000);
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting "+ maxCorruptFileBlocks+ ".",badFiles.size() == maxCorruptFileBlocks);
CorruptFileBlockIterator iter=(CorruptFileBlockIterator)fs.listCorruptFileBlocks(new Path("/srcdat2"));
int corruptPaths=countPaths(iter);
assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got "+ corruptPaths,corruptPaths > maxCorruptFileBlocks);
assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(),iter.getCallsMade() > 1);
util.cleanup(fs,"/srcdat2");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testlistCorruptFileBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
final NameNode namenode=cluster.getNameNode();
Collection corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
int count=0;
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
FSNamesystem.CorruptFileBlockInfo[] cfb=corruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
String[] cookie=new String[]{"1"};
Collection nextCorruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
FSNamesystem.CorruptFileBlockInfo[] ncfb=nextCorruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
numCorrupt=nextCorruptFileBlocks.size();
assertTrue(numCorrupt == 2);
assertTrue(ncfb[0].block.getBlockName().equalsIgnoreCase(cfb[1].block.getBlockName()));
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.createFiles(fs,"/goodData");
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/goodData",null);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* check if nn.getCorruptFiles() returns a file that has corrupted blocks
*/
@Test(timeout=300000) public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,1);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + " but received IOException " + e,false);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
util.cleanup(fs,"/srcdat10");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test(timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
RemoteIterator corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
int numCorrupt=countPaths(corruptFileBlocks);
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 2; i++) {
File storageDir=cluster.getInstanceStorageDir(0,i);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
int count=0;
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Tests metasave after delete, to make sure there are no orphaned blocks
*/
@Test public void testMetasaveAfterDelete() throws IOException, InterruptedException {
for (int i=0; i < 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed);
}
cluster.stopDataNode(1);
Thread.sleep(15000);
namesystem.setReplication("/filestatus0",(short)4);
namesystem.delete("/filestatus0",true);
namesystem.delete("/filestatus1",true);
namesystem.metaSave("metasaveAfterDelete.out.txt");
BufferedReader reader=null;
try {
FileInputStream fstream=new FileInputStream(getLogFile("metasaveAfterDelete.out.txt"));
DataInputStream in=new DataInputStream(fstream);
reader=new BufferedReader(new InputStreamReader(in));
reader.readLine();
String line=reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
line=reader.readLine();
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
line=reader.readLine();
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
}
finally {
if (reader != null) reader.close();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests metasave
*/
@Test public void testMetaSave() throws IOException, InterruptedException {
for (int i=0; i < 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed);
}
cluster.stopDataNode(1);
Thread.sleep(15000);
namesystem.setReplication("/filestatus0",(short)4);
namesystem.metaSave("metasave.out.txt");
FileInputStream fstream=new FileInputStream(getLogFile("metasave.out.txt"));
DataInputStream in=new DataInputStream(fstream);
BufferedReader reader=null;
try {
reader=new BufferedReader(new InputStreamReader(in));
String line=reader.readLine();
Assert.assertEquals("3 files and directories, 2 blocks = 5 total filesystem objects",line);
line=reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line=reader.readLine();
line=reader.readLine();
assertTrue(line.matches("^/filestatus[01]:.*"));
}
finally {
if (reader != null) reader.close();
}
}
BooleanVerifier
@Test(timeout=30000) public void testMetadataVersionOutput() throws IOException {
final PrintStream origOut=System.out;
final ByteArrayOutputStream baos=new ByteArrayOutputStream();
final PrintStream stdOut=new PrintStream(baos);
System.setOut(stdOut);
try {
NameNode.createNameNode(new String[]{"-metadataVersion"},conf);
}
catch ( Exception e) {
assertExceptionContains("ExitException",e);
}
final String verNumStr=HdfsConstants.NAMENODE_LAYOUT_VERSION + "";
assertTrue(baos.toString("UTF-8").contains("HDFS Image Version: " + verNumStr));
assertTrue(baos.toString("UTF-8").contains("Software format version: " + verNumStr));
System.setOut(origOut);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDictionary() throws Exception {
NameCache cache=new NameCache(2);
String[] matching={"part1","part10000000","fileabc","abc","filepart"};
String[] notMatching={"spart1","apart","abcd","def"};
for ( String s : matching) {
cache.put(s);
assertTrue(s == cache.put(s));
}
for ( String s : notMatching) {
cache.put(s);
}
cache.initialized();
for ( String s : matching) {
verifyNameReuse(cache,s,true);
}
assertEquals(matching.length,cache.size());
for ( String s : notMatching) {
verifyNameReuse(cache,s,false);
}
cache.reset();
cache.initialized();
for ( String s : matching) {
verifyNameReuse(cache,s,false);
}
for ( String s : notMatching) {
verifyNameReuse(cache,s,false);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs
* 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/
@Test public void testNameEditsConfigsFailure() throws IOException {
Path file1=new Path("TestNameEditsConfigs1");
Path file2=new Path("TestNameEditsConfigs2");
Path file3=new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster=null;
Configuration conf=null;
FileSystem fileSys=null;
File nameOnlyDir=new File(base_dir,"name");
File editsOnlyDir=new File(base_dir,"edits");
File nameAndEditsDir=new File(base_dir,"name_and_edits");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
assertTrue(nameOnlyDir.mkdir());
assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath() + "," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath() + "," + editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
assertTrue(new File(nameOnlyDir,"current/VERSION").exists());
assertTrue(new File(editsOnlyDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(fileSys.exists(file1));
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
try {
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys,file2,replication);
cleanupFile(fileSys,file2);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fail("Successfully started cluster but should not have been able to.");
}
catch ( IOException e) {
LOG.info("EXPECTED: cluster start failed due to missing " + "latest edits dir",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
cluster=null;
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys,file3,replication);
cleanupFile(fileSys,file3);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* The test creates files and restarts cluster with different configs.
* 1. Starts cluster with shared name and edits dirs
* 2. Restarts cluster by adding additional (different) name and edits dirs
* 3. Restarts cluster by removing shared name and edits dirs by allowing to
* start using separate name and edits dirs
* 4. Restart cluster by adding shared directory again, but make sure we
* do not read any stale image or edits.
* All along the test, we create and delete files at reach restart to make
* sure we are reading proper edits and image.
* @throws Exception
*/
@Test public void testNameEditsConfigs() throws Exception {
Path file1=new Path("TestNameEditsConfigs1");
Path file2=new Path("TestNameEditsConfigs2");
Path file3=new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=null;
FileSystem fileSys=null;
final File newNameDir=new File(base_dir,"name");
final File newEditsDir=new File(base_dir,"edits");
final File nameAndEdits=new File(base_dir,"name_and_edits");
final File checkpointNameDir=new File(base_dir,"secondname");
final File checkpointEditsDir=new File(base_dir,"secondedits");
final File checkpointNameAndEdits=new File(base_dir,"second_name_and_edits");
ImmutableList allCurrentDirs=ImmutableList.of(new File(nameAndEdits,"current"),new File(newNameDir,"current"),new File(newEditsDir,"current"),new File(checkpointNameAndEdits,"current"),new File(checkpointNameDir,"current"),new File(checkpointEditsDir,"current"));
ImmutableList imageCurrentDirs=ImmutableList.of(new File(nameAndEdits,"current"),new File(newNameDir,"current"),new File(checkpointNameAndEdits,"current"),new File(checkpointNameDir,"current"));
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file1,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
conf=new HdfsConfiguration();
assertTrue(newNameDir.mkdir());
assertTrue(newEditsDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath() + "," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits.getPath() + "," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(fileSys.exists(file1));
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file2,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs,ImmutableSet.of("VERSION"));
FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys,file2,replication);
cleanupFile(fileSys,file2);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
checkImageAndEditsFilesExistence(newNameDir,true,false);
checkImageAndEditsFilesExistence(newEditsDir,false,true);
checkImageAndEditsFilesExistence(checkpointNameDir,true,false);
checkImageAndEditsFilesExistence(checkpointEditsDir,false,true);
assertTrue(FileUtil.fullyDelete(new File(nameAndEdits,"current")));
assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits,"current")));
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath() + "," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits + "," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys,file3,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
checkImageAndEditsFilesExistence(nameAndEdits,true,true);
checkImageAndEditsFilesExistence(checkpointNameAndEdits,true,true);
}
BooleanVerifier
/**
* Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
* should tolerate white space between values.
*/
@Test public void testCheckPointDirsAreTrimmed() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File checkpointNameDir1=new File(base_dir,"chkptName1");
File checkpointEditsDir1=new File(base_dir,"chkptEdits1");
File checkpointNameDir2=new File(base_dir,"chkptName2");
File checkpointEditsDir2=new File(base_dir,"chkptEdits2");
File nameDir=new File(base_dir,"name1");
String whiteSpace=" \n \n ";
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getPath());
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,whiteSpace + checkpointNameDir1.getPath() + whiteSpace,whiteSpace + checkpointNameDir2.getPath() + whiteSpace);
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,whiteSpace + checkpointEditsDir1.getPath() + whiteSpace,whiteSpace + checkpointEditsDir2.getPath() + whiteSpace);
cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(3).build();
try {
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",checkpointNameDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",checkpointNameDir2.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + " must be trimmed ",checkpointEditsDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + " must be trimmed ",checkpointEditsDir2.exists());
}
finally {
secondary.shutdown();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY,policy.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,"localhost:0");
InetSocketAddress addr=InetSocketAddress.createUnresolved("localhost",0);
NameNodeHttpServer server=null;
try {
server=new NameNodeHttpServer(conf,null,addr);
server.start();
Assert.assertTrue(implies(policy.isHttpEnabled(),canAccess("http",server.getHttpAddress())));
Assert.assertTrue(implies(!policy.isHttpEnabled(),server.getHttpAddress() == null));
Assert.assertTrue(implies(policy.isHttpsEnabled(),canAccess("https",server.getHttpsAddress())));
Assert.assertTrue(implies(!policy.isHttpsEnabled(),server.getHttpsAddress() == null));
}
finally {
if (server != null) {
server.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked"}) @Test public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
for ( DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getNumDatanodesInService() != 2) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId");
assertEquals(fsn.getClusterId(),clusterId);
String blockpoolId=(String)mbs.getAttribute(mxbeanName,"BlockPoolId");
assertEquals(fsn.getBlockPoolId(),blockpoolId);
String version=(String)mbs.getAttribute(mxbeanName,"Version");
assertEquals(fsn.getVersion(),version);
assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
Long used=(Long)mbs.getAttribute(mxbeanName,"Used");
assertEquals(fsn.getUsed(),used.longValue());
Long total=(Long)mbs.getAttribute(mxbeanName,"Total");
assertEquals(fsn.getTotal(),total.longValue());
String safemode=(String)mbs.getAttribute(mxbeanName,"Safemode");
assertEquals(fsn.getSafemode(),safemode);
Long nondfs=(Long)(mbs.getAttribute(mxbeanName,"NonDfsUsedSpace"));
assertEquals(fsn.getNonDfsUsedSpace(),nondfs.longValue());
Float percentremaining=(Float)(mbs.getAttribute(mxbeanName,"PercentRemaining"));
assertEquals(fsn.getPercentRemaining(),percentremaining.floatValue(),DELTA);
Long totalblocks=(Long)(mbs.getAttribute(mxbeanName,"TotalBlocks"));
assertEquals(fsn.getTotalBlocks(),totalblocks.longValue());
String alivenodeinfo=(String)(mbs.getAttribute(mxbeanName,"LiveNodes"));
Map> liveNodes=(Map>)JSON.parse(alivenodeinfo);
assertTrue(liveNodes.size() > 0);
for ( Map liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long)liveNode.get("capacity")) > 0);
assertTrue(liveNode.containsKey("numBlocks"));
assertTrue(((Long)liveNode.get("numBlocks")) == 0);
}
assertEquals(fsn.getLiveNodes(),alivenodeinfo);
String deadnodeinfo=(String)(mbs.getAttribute(mxbeanName,"DeadNodes"));
assertEquals(fsn.getDeadNodes(),deadnodeinfo);
Map> deadNodes=(Map>)JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for ( Map deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
String nodeUsage=(String)(mbs.getAttribute(mxbeanName,"NodeUsage"));
assertEquals("Bad value for NodeUsage",fsn.getNodeUsage(),nodeUsage);
String nameJournalStatus=(String)(mbs.getAttribute(mxbeanName,"NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus",fsn.getNameJournalStatus(),nameJournalStatus);
String journalTxnInfo=(String)mbs.getAttribute(mxbeanName,"JournalTransactionInfo");
assertEquals("Bad value for NameTxnIds",fsn.getJournalTransactionInfo(),journalTxnInfo);
String nnStarted=(String)mbs.getAttribute(mxbeanName,"NNStarted");
assertEquals("Bad value for NNStarted",fsn.getNNStarted(),nnStarted);
String compileInfo=(String)mbs.getAttribute(mxbeanName,"CompileInfo");
assertEquals("Bad value for CompileInfo",fsn.getCompileInfo(),compileInfo);
String corruptFiles=(String)(mbs.getAttribute(mxbeanName,"CorruptFiles"));
assertEquals("Bad value for CorruptFiles",fsn.getCorruptFiles(),corruptFiles);
String nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
assertEquals(fsn.getNameDirStatuses(),nameDirStatuses);
Map> statusMap=(Map>)JSON.parse(nameDirStatuses);
Collection nameDirUris=cluster.getNameDirs(0);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
}
assertEquals(2,statusMap.get("active").size());
assertEquals(0,statusMap.get("failed").size());
File failedNameDir=new File(nameDirUris.iterator().next());
assertEquals(0,FileUtil.chmod(new File(failedNameDir,"current").getAbsolutePath(),"000"));
cluster.getNameNodeRpc().rollEditLog();
nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
statusMap=(Map>)JSON.parse(nameDirStatuses);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
String expectedStatus=nameDir.equals(failedNameDir) ? "failed" : "active";
System.out.println("Checking for the presence of " + nameDir + " in "+ expectedStatus+ " name dirs.");
assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
}
assertEquals(1,statusMap.get("active").size());
assertEquals(1,statusMap.get("failed").size());
assertEquals(0L,mbs.getAttribute(mxbeanName,"CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(),mbs.getAttribute(mxbeanName,"CacheCapacity"));
}
finally {
if (cluster != null) {
for ( URI dir : cluster.getNameDirs(0)) {
FileUtil.chmod(new File(new File(dir),"current").getAbsolutePath(),"755");
}
cluster.shutdown();
}
}
}
BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testUpgrade(){
StartupOption opt=null;
opt=NameNode.parseArguments(new String[]{"-upgrade"});
assertEquals(opt,StartupOption.UPGRADE);
assertNull(opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid"});
assertEquals(StartupOption.UPGRADE,opt);
assertEquals("mycid",opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid","-renameReserved",".snapshot=.my-snapshot,.reserved=.my-reserved"});
assertEquals(StartupOption.UPGRADE,opt);
assertEquals("mycid",opt.getClusterId());
assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved"));
FSImageFormat.renameReservedMap.clear();
opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.my-snapshot","-clusterid","mycid"});
assertEquals(StartupOption.UPGRADE,opt);
assertEquals("mycid",opt.getClusterId());
assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved"));
opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved"});
assertEquals(StartupOption.UPGRADE,opt);
assertEquals(".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".reserved"));
try {
opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.not-reserved=.my-not-reserved"});
}
catch ( IllegalArgumentException e) {
assertExceptionContains("Unknown reserved path",e);
}
try {
opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.snapshot"});
}
catch ( IllegalArgumentException e) {
assertExceptionContains("Invalid rename path",e);
}
try {
opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".snapshot=.reserved"});
}
catch ( IllegalArgumentException e) {
assertExceptionContains("Invalid rename path",e);
}
opt=NameNode.parseArguments(new String[]{"-upgrade","-cid"});
assertNull(opt);
}
InternalCallVerifier BooleanVerifier
/**
* Tests that hasAvailableDiskSpace returns true if disk usage is below
* threshold.
*/
@Test public void testCheckAvailability() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,0);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertTrue("isResourceAvailable must return true if " + "disk usage is lower than threshold",nb.hasAvailableDiskSpace());
}
InternalCallVerifier BooleanVerifier
/**
* Test that the NN is considered to be out of resources only once all
* redundant configured volumes are low on resources, or when any required
* volume is low on resources.
*/
@Test public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
File nameDir1=new File(BASE_DIR,"name-dir1");
File nameDir2=new File(BASE_DIR,"name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,2);
NameNodeResourceChecker nnrc=new NameNodeResourceChecker(conf);
Map volumes=new HashMap();
CheckedVolume volume1=Mockito.mock(CheckedVolume.class);
CheckedVolume volume2=Mockito.mock(CheckedVolume.class);
CheckedVolume volume3=Mockito.mock(CheckedVolume.class);
CheckedVolume volume4=Mockito.mock(CheckedVolume.class);
CheckedVolume volume5=Mockito.mock(CheckedVolume.class);
Mockito.when(volume1.isResourceAvailable()).thenReturn(true);
Mockito.when(volume2.isResourceAvailable()).thenReturn(true);
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(true);
Mockito.when(volume5.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isRequired()).thenReturn(true);
Mockito.when(volume5.isRequired()).thenReturn(true);
volumes.put("volume1",volume1);
volumes.put("volume2",volume2);
volumes.put("volume3",volume3);
volumes.put("volume4",volume4);
volumes.put("volume5",volume5);
nnrc.setVolumes(volumes);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume1.isResourceAvailable()).thenReturn(false);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume2.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
nnrc.setMinimumReduntdantVolumes(1);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume3.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that hasAvailableDiskSpace returns false if disk usage is above
* threshold.
*/
@Test public void testCheckAvailabilityNeg() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertFalse("isResourceAvailable must return false if " + "disk usage is higher than threshold",nb.hasAvailableDiskSpace());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low.
*/
@Test public void testCheckThatNameNodeResourceMonitorIsRunning() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
NameNodeResourceChecker mockResourceChecker=Mockito.mock(NameNodeResourceChecker.class);
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true);
cluster.getNameNode().getNamesystem().nnResourceChecker=mockResourceChecker;
cluster.waitActive();
String name=NameNodeResourceMonitor.class.getName();
boolean isNameNodeMonitorRunning=false;
Set runningThreads=Thread.getAllStackTraces().keySet();
for ( Thread runningThread : runningThreads) {
if (runningThread.toString().startsWith("Thread[" + name)) {
isNameNodeMonitorRunning=true;
break;
}
}
assertTrue("NN resource monitor should be running",isNameNodeMonitorRunning);
assertFalse("NN should not presently be in safe mode",cluster.getNameNode().isInSafeMode());
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);
long startMillis=Time.now();
while (!cluster.getNameNode().isInSafeMode() && Time.now() < startMillis + (60 * 1000)) {
Thread.sleep(1000);
}
assertTrue("NN should be in safe mode after resources crossed threshold",cluster.getNameNode().isInSafeMode());
}
finally {
if (cluster != null) cluster.shutdown();
}
}
BooleanVerifier
@Test public void testSingleRequiredResource(){
assertTrue(testResourceScenario(0,1,0,0,0));
assertFalse(testResourceScenario(0,1,0,1,0));
}
BooleanVerifier
@Test public void testSingleRedundantResource(){
assertTrue(testResourceScenario(1,0,0,0,1));
assertFalse(testResourceScenario(1,0,1,0,1));
}
BooleanVerifier
@Test public void testRedundantWithRequiredResources(){
assertTrue(testResourceScenario(2,2,0,0,1));
assertTrue(testResourceScenario(2,2,1,0,1));
assertFalse(testResourceScenario(2,2,2,0,1));
assertFalse(testResourceScenario(2,2,0,1,1));
assertFalse(testResourceScenario(2,2,1,1,1));
assertFalse(testResourceScenario(2,2,2,1,1));
}
BooleanVerifier
@Test public void testMultipleRequiredResources(){
assertTrue(testResourceScenario(0,3,0,0,0));
assertFalse(testResourceScenario(0,3,0,1,0));
assertFalse(testResourceScenario(0,3,0,2,0));
assertFalse(testResourceScenario(0,3,0,3,0));
}
BooleanVerifier
@Test public void testMultipleRedundantResources(){
assertTrue(testResourceScenario(4,0,0,0,4));
assertFalse(testResourceScenario(4,0,1,0,4));
assertTrue(testResourceScenario(4,0,1,0,3));
assertFalse(testResourceScenario(4,0,2,0,3));
assertTrue(testResourceScenario(4,0,2,0,2));
assertFalse(testResourceScenario(4,0,3,0,2));
assertTrue(testResourceScenario(4,0,3,0,1));
assertFalse(testResourceScenario(4,0,4,0,1));
assertFalse(testResourceScenario(1,0,0,0,2));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testHttpBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_HTTP_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* HTTPS test is different since we need to setup SSL configuration.
* NN also binds the wildcard address for HTTPS port by default so we must
* pick a different host/port combination.
* @throws Exception
*/
@Test(timeout=300000) public void testHttpsBindHostKey() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
setupSsl();
conf.set(DFS_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.name());
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpsAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing behavior with " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_HTTPS_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpsAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
*/
@Test public void testVolumeSize() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
long reserved=10000;
conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,reserved);
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final List live=new ArrayList();
final List dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
assertTrue(live.size() == 1);
long used, remaining, configCapacity, nonDFSUsed, bpUsed;
float percentUsed, percentRemaining, percentBpUsed;
for ( final DatanodeDescriptor datanode : live) {
used=datanode.getDfsUsed();
remaining=datanode.getRemaining();
nonDFSUsed=datanode.getNonDfsUsed();
configCapacity=datanode.getCapacity();
percentUsed=datanode.getDfsUsedPercent();
percentRemaining=datanode.getRemainingPercent();
bpUsed=datanode.getBlockPoolUsed();
percentBpUsed=datanode.getBlockPoolUsedPercent();
LOG.info("Datanode configCapacity " + configCapacity + " used "+ used+ " non DFS used "+ nonDFSUsed+ " remaining "+ remaining+ " perentUsed "+ percentUsed+ " percentRemaining "+ percentRemaining);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtil.getPercentUsed(used,configCapacity));
assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,configCapacity));
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,configCapacity));
}
DF df=new DF(new File(cluster.getDataDirectory()),conf);
int numOfDataDirs=2;
long diskCapacity=numOfDataDirs * df.getCapacity();
reserved*=numOfDataDirs;
configCapacity=namesystem.getCapacityTotal();
used=namesystem.getCapacityUsed();
nonDFSUsed=namesystem.getNonDfsUsedSpace();
remaining=namesystem.getCapacityRemaining();
percentUsed=namesystem.getPercentUsed();
percentRemaining=namesystem.getPercentRemaining();
bpUsed=namesystem.getBlockPoolUsedSpace();
percentBpUsed=namesystem.getPercentBlockPoolUsed();
LOG.info("Data node directory " + cluster.getDataDirectory());
LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "+ configCapacity+ " reserved "+ reserved+ " used "+ used+ " remaining "+ remaining+ " nonDFSUsed "+ nonDFSUsed+ " remaining "+ remaining+ " percentUsed "+ percentUsed+ " percentRemaining "+ percentRemaining+ " bpUsed "+ bpUsed+ " percentBpUsed "+ percentBpUsed);
assertTrue(configCapacity == diskCapacity - reserved);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtil.getPercentUsed(used,configCapacity));
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,configCapacity));
assertTrue(percentRemaining == ((float)remaining * 100.0f) / (float)configCapacity);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0);
LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.restartNameNode();
cluster.waitActive();
namesystem=cluster.getNamesystem();
assertTrue(namesystem.hasRetryCache());
cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests for delete call
*/
@Test public void testDelete() throws Exception {
String dir="/testNamenodeRetryCache/testDelete";
newCall();
namesystem.mkdirs(dir,perm,true);
newCall();
Assert.assertTrue(namesystem.delete(dir,false));
Assert.assertTrue(namesystem.delete(dir,false));
Assert.assertTrue(namesystem.delete(dir,false));
newCall();
Assert.assertFalse(namesystem.delete(dir,false));
}
InternalCallVerifier BooleanVerifier
/**
* Test for rename1
*/
@SuppressWarnings("deprecation") @Test public void testRename1() throws Exception {
String src="/testNamenodeRetryCache/testRename1/src";
String target="/testNamenodeRetryCache/testRename1/target";
resetCall();
namesystem.mkdirs(src,perm,true);
newCall();
Assert.assertTrue(namesystem.renameTo(src,target));
Assert.assertTrue(namesystem.renameTo(src,target));
Assert.assertTrue(namesystem.renameTo(src,target));
newCall();
Assert.assertFalse(namesystem.renameTo(src,target));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* check if DFS remains in proper condition after a restart
*/
@Test public void testRestartDFS() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FSNamesystem fsn=null;
int numNamenodeDirs;
DFSTestUtil files=new DFSTestUtil.Builder().setName("TestRestartDFS").setNumFiles(200).build();
final String dir="/srcdat";
final Path rootpath=new Path("/");
final Path dirpath=new Path(dir);
long rootmtime;
FileStatus rootstatus;
FileStatus dirstatus;
try {
cluster=new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build();
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new String[]{});
numNamenodeDirs=nameNodeDirs.length;
assertTrue("failed to get number of Namenode StorageDirs",numNamenodeDirs != 0);
FileSystem fs=cluster.getFileSystem();
files.createFiles(fs,dir);
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
rootstatus=fs.getFileStatus(dirpath);
dirstatus=fs.getFileStatus(dirpath);
fs.setOwner(rootpath,rootstatus.getOwner() + "_XXX",null);
fs.setOwner(dirpath,null,dirstatus.getGroup() + "_XXX");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
fsn=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",files.checkFiles(fs,dir));
final FileStatus newrootstatus=fs.getFileStatus(rootpath);
assertEquals(rootmtime,newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX",newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(),newrootstatus.getGroup());
final FileStatus newdirstatus=fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(),newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX",newdirstatus.getGroup());
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
final String checkAfterRestart=checkImages(fsn,numNamenodeDirs);
files.cleanup(fs,dir);
files.createFiles(fs,dir);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
final String checkAfterModify=checkImages(fsn,numNamenodeDirs);
assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: "+ checkAfterModify,checkAfterRestart.equals(checkAfterModify));
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
files.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify that a saveNamespace command brings faulty directories
* in fs.name.dir and fs.edit.dir back online.
*/
@Test(timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception {
Configuration conf=getConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
FSImage originalImage=fsn.getFSImage();
NNStorage storage=originalImage.getStorage();
FSImage spyImage=spy(originalImage);
Whitebox.setInternalState(fsn,"fsImage",spyImage);
FileSystem fs=FileSystem.getLocal(conf);
File rootDir=storage.getStorageDir(0).getRoot();
Path rootPath=new Path(rootDir.getPath(),"current");
final FsPermission permissionNone=new FsPermission((short)0);
final FsPermission permissionAll=new FsPermission(FsAction.ALL,FsAction.READ_EXECUTE,FsAction.READ_EXECUTE);
fs.setPermission(rootPath,permissionNone);
try {
doAnEdit(fsn,1);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
LOG.info("Doing the first savenamespace.");
fsn.saveNamespace();
LOG.info("First savenamespace sucessful.");
assertTrue("Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 1);
fs.setPermission(rootPath,permissionAll);
LOG.info("Doing the second savenamespace.");
fsn.saveNamespace();
LOG.warn("Second savenamespace sucessful.");
assertTrue("Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 0);
LOG.info("Shutting down fsimage.");
originalImage.close();
fsn.close();
fsn=null;
LOG.info("Loading new FSmage from disk.");
fsn=FSNamesystem.loadFromDisk(conf);
LOG.info("Checking reloaded image.");
checkEditExists(fsn,1);
LOG.info("Reloaded image is good.");
}
finally {
if (rootDir.exists()) {
fs.setPermission(rootPath,permissionAll);
}
if (fsn != null) {
try {
fsn.close();
}
catch ( Throwable t) {
LOG.fatal("Failed to shut down",t);
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file while modifying file after snapshot.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterModification() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
final long modTime=inodes[inodes.length - 1].getModificationTime();
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s3");
DFSTestUtil.appendFile(hdfs,file1,"the content for appending");
String snapshotPath=sub1.toString() + "/.snapshot/s3/file1";
names=INode.getPathNames(snapshotPath);
components=INode.getPathComponents(names);
INodesInPath ssNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] ssInodes=ssNodesInPath.getINodes();
assertEquals(ssInodes.length,components.length - 1);
final Snapshot s3=getSnapshot(ssNodesInPath,"s3");
assertSnapshot(ssNodesInPath,true,s3,3);
INode snapshotFileNode=ssInodes[ssInodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(),file1.getName());
assertTrue(snapshotFileNode.asFile().isWithSnapshot());
assertEquals(modTime,snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
names=INode.getPathNames(file1.toString());
components=INode.getPathComponents(names);
INodesInPath newNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
assertSnapshot(newNodesInPath,false,s3,-1);
INode[] newInodes=newNodesInPath.getINodes();
assertEquals(newInodes.length,components.length);
final int last=components.length - 1;
assertEquals(newInodes[last].getFullPathName(),file1.toString());
Assert.assertFalse(modTime == newInodes[last].getModificationTime());
hdfs.deleteSnapshot(sub1,"s3");
hdfs.disallowSnapshot(sub1);
}
InternalCallVerifier BooleanVerifier
/**
* Test allow-snapshot operation.
*/
@Test(timeout=15000) public void testAllowSnapshot() throws Exception {
final String pathStr=sub1.toString();
final INode before=fsdir.getINode(pathStr);
Assert.assertFalse(before.asDirectory().isSnapshottable());
final Path path=new Path(pathStr);
hdfs.allowSnapshot(path);
{
final INode after=fsdir.getINode(pathStr);
Assert.assertTrue(after.asDirectory().isSnapshottable());
}
hdfs.disallowSnapshot(path);
{
final INode after=fsdir.getINode(pathStr);
Assert.assertFalse(after.asDirectory().isSnapshottable());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for normal (non-snapshot) file.
*/
@Test(timeout=15000) public void testNonSnapshotPathINodes() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertSnapshot(nodesInPath,false,null,-1);
assertTrue("file1=" + file1 + ", nodesInPath="+ nodesInPath,inodes[components.length - 1] != null);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[0].getFullPathName(),file1.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[1].getFullPathName(),file1.toString());
assertEquals(inodes[0].getFullPathName(),sub1.toString());
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file.
*/
@Test(timeout=15000) public void testSnapshotPathINodes() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s1");
String snapshotPath=sub1.toString() + "/.snapshot/s1/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
final Snapshot snapshot=getSnapshot(nodesInPath,"s1");
assertSnapshot(nodesInPath,true,snapshot,3);
INode snapshotFileNode=inodes[inodes.length - 1];
assertINodeFile(snapshotFileNode,file1);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,true,snapshot,-1);
assertINodeFile(nodesInPath.getLastINode(),file1);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,true,snapshot,0);
assertINodeFile(nodesInPath.getLastINode(),file1);
String dotSnapshotPath=sub1.toString() + "/.snapshot";
names=INode.getPathNames(dotSnapshotPath);
components=INode.getPathComponents(names);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
assertSnapshot(nodesInPath,true,snapshot,-1);
final INode last=nodesInPath.getLastINode();
assertEquals(last.getFullPathName(),sub1.toString());
assertFalse(last instanceof INodeFile);
String[] invalidPathComponent={"invalidDir","foo",".snapshot","bar"};
Path invalidPath=new Path(invalidPathComponent[0]);
for (int i=1; i < invalidPathComponent.length; i++) {
invalidPath=new Path(invalidPath,invalidPathComponent[i]);
try {
hdfs.getFileStatus(invalidPath);
Assert.fail();
}
catch ( FileNotFoundException fnfe) {
System.out.println("The exception is expected: " + fnfe);
}
}
hdfs.deleteSnapshot(sub1,"s1");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file after deleting the original file.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterDeletion() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s2");
hdfs.delete(file1,false);
final Snapshot snapshot;
{
String snapshotPath=sub1.toString() + "/.snapshot/s2/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
snapshot=getSnapshot(nodesInPath,"s2");
assertSnapshot(nodesInPath,true,snapshot,3);
final INode inode=inodes[inodes.length - 1];
assertEquals(file1.getName(),inode.getLocalName());
assertTrue(inode.asFile().isWithSnapshot());
}
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(nodesInPath.getNumNonNull(),components.length - 1);
assertSnapshot(nodesInPath,false,snapshot,-1);
assertNull(inodes[components.length - 1]);
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
hdfs.deleteSnapshot(sub1,"s2");
hdfs.disallowSnapshot(sub1);
}
InternalCallVerifier BooleanVerifier
@Test public void testCompression() throws IOException {
LOG.info("Test compressing image.");
Configuration conf=new Configuration();
FileSystem.setDefaultUri(conf,"hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,"127.0.0.1:0");
File base_dir=new File(PathUtils.getTestDir(getClass()),"dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new File(base_dir,"name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
DFSTestUtil.formatNameNode(conf);
LOG.info("Create an uncompressed fsimage");
NameNode namenode=new NameNode(conf);
namenode.getNamesystem().mkdirs("/test",new PermissionStatus("hairong",null,FsPermission.getDefault()),true);
NamenodeProtocols nnRpc=namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
nnRpc.saveNamespace();
namenode.stop();
namenode.join();
LOG.info("Read an uncomressed image and store it compressed using default codec.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,true);
checkNameSpace(conf);
LOG.info("Read a compressed image and store it using a different codec.");
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,"org.apache.hadoop.io.compress.GzipCodec");
checkNameSpace(conf);
LOG.info("Read an compressed iamge and store it as uncompressed.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
checkNameSpace(conf);
LOG.info("Read an uncompressed image and store it as uncompressed.");
checkNameSpace(conf);
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL=1;
localFileSys=FileSystem.getLocal(config);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn");
hostsFile=new Path(dir,"hosts");
excludeFile=new Path(dir,"exclude");
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(localFileSys,excludeFile,null);
config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
ArrayList list=new ArrayList();
byte b[]={127,0,0,1};
InetAddress inetAddress=InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys,hostsFile,list);
int numDatanodes=1;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn=cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i=0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length);
}
catch ( IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
}
finally {
cleanupFile(localFileSys,excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests the upgrade from version 0.20.204 to Federation version Test without
* clusterid the case: -upgrade
* Expected to generate clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFrom204() throws Exception {
layoutVersion=Feature.RESERVED_REL20_204.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertTrue("Clusterid should start with CID",storage.getClusterID().startsWith("CID"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test dfsadmin -restoreFailedStorage command
* @throws Exception
*/
@Test public void testDfsAdminCmd() throws Exception {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(2).manageNameDfsDirs(false).build();
cluster.waitActive();
try {
FSImage fsi=cluster.getNameNode().getFSImage();
boolean restore=fsi.getStorage().getRestoreFailedStorage();
LOG.info("Restore is " + restore);
assertEquals(restore,true);
String cmd="-fs NAMENODE -restoreFailedStorage false";
String namenode=config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
CommandExecutor executor=new CLITestCmdDFS(cmd,new CLICommandDFSAdmin()).getExecutor(namenode);
executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertFalse("After set true call restore is " + restore,restore);
cmd="-fs NAMENODE -restoreFailedStorage true";
executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertTrue("After set false call restore is " + restore,restore);
cmd="-fs NAMENODE -restoreFailedStorage check";
CommandExecutor.Result cmdResult=executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertTrue("After check call restore is " + restore,restore);
String commandOutput=cmdResult.getCommandOutput();
commandOutput.trim();
assertTrue(commandOutput.contains("restoreFailedStorage is set to true"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* 1. create DFS cluster with 3 storage directories
* - 2 EDITS_IMAGE(name1, name2), 1 EDITS(name3)
* 2. create a file
* 3. corrupt/disable name2 and name3 by removing rwx permission
* 4. run doCheckpoint
* - will fail on removed dirs (which invalidates them)
* 5. write another file
* 6. check there is only one healthy storage dir
* 7. run doCheckpoint - recover should fail but checkpoint should succeed
* 8. check there is still only one healthy storage dir
* 9. restore the access permission for name2 and name 3, run checkpoint again
* 10.verify there are 3 healthy storage dirs.
*/
@Test public void testStorageRestoreFailure() throws Exception {
SecondaryNameNode secondary=null;
String nameDir2=Shell.WINDOWS ? (new File(path2,"current").getAbsolutePath()) : path2.toString();
String nameDir3=Shell.WINDOWS ? (new File(path3,"current").getAbsolutePath()) : path3.toString();
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(0).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=new SecondaryNameNode(config);
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/","test");
assertTrue(fs.mkdirs(path));
assertTrue(FileUtil.chmod(nameDir2,"000") == 0);
assertTrue(FileUtil.chmod(nameDir3,"000") == 0);
secondary.doCheckpoint();
printStorages(cluster.getNameNode().getFSImage());
path=new Path("/","test1");
assertTrue(fs.mkdirs(path));
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
assertTrue(FileUtil.chmod(nameDir2,"755") == 0);
assertTrue(FileUtil.chmod(nameDir3,"755") == 0);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 3);
}
finally {
if (path2.exists()) {
FileUtil.chmod(nameDir2,"755");
}
if (path3.exists()) {
FileUtil.chmod(nameDir3,"755");
}
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test to simulate interleaved checkpointing by 2 2NNs after a storage
* directory has been taken offline. The first will cause the directory to
* come back online, but it won't have any valid contents. The second 2NN will
* then try to perform a checkpoint. The NN should not serve up the image or
* edits from the restored (empty) dir.
*/
@Test public void testMultipleSecondaryCheckpoint() throws IOException {
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=new SecondaryNameNode(config);
FSImage fsImage=cluster.getNameNode().getFSImage();
printStorages(fsImage);
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/","test");
assertTrue(fs.mkdirs(testPath));
printStorages(fsImage);
invalidateStorage(fsImage,ImmutableSet.of(path1));
cluster.getNameNodeRpc().rollEditLog();
printStorages(fsImage);
secondary.doCheckpoint();
printStorages(fsImage);
assertTrue("path exists before restart",fs.exists(testPath));
secondary.shutdown();
cluster.restartNameNode();
assertTrue("path should still exist after restart",fs.exists(testPath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* test
* 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS
* 2. create a cluster and write a file
* 3. corrupt/disable one storage (or two) by removing
* 4. run doCheckpoint - it will fail on removed dirs (which
* will invalidate the storages)
* 5. write another file
* 6. check that edits and fsimage differ
* 7. run doCheckpoint
* 8. verify that all the image and edits files are the same.
*/
@Test public void testStorageRestore() throws Exception {
int numDatanodes=0;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).manageNameDfsDirs(false).build();
cluster.waitActive();
SecondaryNameNode secondary=new SecondaryNameNode(config);
System.out.println("****testStorageRestore: Cluster and SNN started");
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/","test");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test' created, invalidating storage...");
invalidateStorage(cluster.getNameNode().getFSImage(),ImmutableSet.of(path2,path3));
printStorages(cluster.getNameNode().getFSImage());
System.out.println("****testStorageRestore: storage invalidated");
path=new Path("/","test1");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test1' created");
FSImageTestUtil.assertFileContentsDifferent(2,new File(path1,"current/" + getInProgressEditsFileName(1)),new File(path2,"current/" + getInProgressEditsFileName(1)),new File(path3,"current/" + getInProgressEditsFileName(1)));
FSImageTestUtil.assertFileContentsSame(new File(path2,"current/" + getInProgressEditsFileName(1)),new File(path3,"current/" + getInProgressEditsFileName(1)));
System.out.println("****testStorageRestore: checkfiles(false) run");
secondary.doCheckpoint();
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getImageFileName(4)),new File(path2,"current/" + getImageFileName(4)));
assertFalse("Should not have any image in an edits-only directory",new File(path3,"current/" + getImageFileName(4)).exists());
assertTrue("Should have finalized logs in the directory that didn't fail",new File(path1,"current/" + getFinalizedEditsFileName(1,4)).exists());
assertFalse("Should not have finalized logs in the failed directories",new File(path2,"current/" + getFinalizedEditsFileName(1,4)).exists());
assertFalse("Should not have finalized logs in the failed directories",new File(path3,"current/" + getFinalizedEditsFileName(1,4)).exists());
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getInProgressEditsFileName(5)),new File(path2,"current/" + getInProgressEditsFileName(5)),new File(path3,"current/" + getInProgressEditsFileName(5)));
String md5BeforeEdit=FSImageTestUtil.getFileMD5(new File(path1,"current/" + getInProgressEditsFileName(5)));
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getImageFileName(0)),new File(path2,"current/" + getImageFileName(0)));
path=new Path("/","test2");
assertTrue(fs.mkdirs(path));
String md5AfterEdit=FSImageTestUtil.getFileMD5(new File(path1,"current/" + getInProgressEditsFileName(5)));
assertFalse(md5BeforeEdit.equals(md5AfterEdit));
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getInProgressEditsFileName(5)),new File(path2,"current/" + getInProgressEditsFileName(5)),new File(path3,"current/" + getInProgressEditsFileName(5)));
secondary.shutdown();
cluster.shutdown();
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getFinalizedEditsFileName(5,7)),new File(path2,"current/" + getFinalizedEditsFileName(5,7)),new File(path3,"current/" + getFinalizedEditsFileName(5,7)));
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Regression test for HDFS-1997. Test that, if an exception
* occurs on the client side, it is properly reported as such,
* and reported to the associated NNStorage object.
*/
@Test public void testClientSideException() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage mockStorage=Mockito.mock(NNStorage.class);
List localPath=Collections.singletonList(new File("/xxxxx-does-not-exist/blah"));
try {
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
String id="getimage=1&txid=0";
TransferFsImage.getFileClient(fsName,id,localPath,mockStorage,false);
fail("Didn't get an exception!");
}
catch ( IOException ioe) {
Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0));
assertTrue("Unexpected exception: " + StringUtils.stringifyException(ioe),ioe.getMessage().contains("Unable to download to any storage"));
}
finally {
cluster.shutdown();
}
}
BooleanVerifier
/**
* Similar to the above test, except that there are multiple local files
* and one of them can be saved.
*/
@Test public void testClientSideExceptionOnJustOneDir() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage mockStorage=Mockito.mock(NNStorage.class);
List localPaths=ImmutableList.of(new File("/xxxxx-does-not-exist/blah"),new File(TEST_DIR,"testfile"));
try {
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
String id="getimage=1&txid=0";
TransferFsImage.getFileClient(fsName,id,localPaths,mockStorage,false);
Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0));
assertTrue("The valid local file should get saved properly",localPaths.get(1).length() > 0);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for the case where the shared edits dir doesn't have
* all of the recent edit logs.
*/
@Test public void testSharedEditsMissingLogs() throws Exception {
removeStandbyNameDirs();
CheckpointSignature sig=nn0.getRpcServer().rollEditLog();
assertEquals(3,sig.getCurSegmentTxId());
URI editsUri=cluster.getSharedEditsDir(0,1);
File editsDir=new File(editsUri);
File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2));
GenericTestUtils.assertExists(editsSegment);
assertTrue(editsSegment.delete());
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class));
try {
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc);
}
finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Make sure that starting a second NN with the -upgrade flag fails if the
* other NN has already done that.
*/
@Test public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
try {
cluster.restartNameNode(1,false);
fail("Should not have been able to start second NN with -upgrade");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Ensure that an admin cannot finalize an HA upgrade without at least one NN
* being active.
*/
@Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
cluster.transitionToStandby(1);
try {
runFinalizeCommand(cluster);
fail("Should not have been able to finalize upgrade with no NN active");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN with NFS-based HA can successfully start and
* upgrade.
*/
@Test public void testNfsUpgrade() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
assertTrue(fs.mkdirs(new Path("/foo2")));
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
IterativeVerifier BooleanVerifier
/**
* Make sure that an HA NN will start if a previous upgrade was in progress.
*/
@Test public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
for (int i=0; i < 2; i++) {
for ( URI uri : cluster.getNameDirs(i)) {
File prevTmp=new File(new File(uri),Storage.STORAGE_TMP_PREVIOUS);
LOG.info("creating previous tmp dir: " + prevTmp);
assertTrue(prevTmp.mkdirs());
}
}
cluster.restartNameNodes();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN can successfully upgrade when configured using
* JournalNodes.
*/
@Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rollback with NFS shared dir.
*/
@Test public void testRollbackWithNfs() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkPreviousDirExistence(sharedDir,false);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkJnPreviousDirExistence(qjCluster,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkJnPreviousDirExistence(qjCluster,false);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that even if the NN which initiated the upgrade is in the standby
* state that we're allowed to finalize.
*/
@Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that automatic failover is triggered by shutting the
* active NN down.
*/
@Test(timeout=60000) public void testFailoverAndBackOnNNShutdown() throws Exception {
Path p1=new Path("/dir1");
Path p2=new Path("/dir2");
fs.mkdirs(p1);
cluster.shutdownNameNode(0);
assertTrue(fs.exists(p1));
fs.mkdirs(p2);
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr1.zkfc.getLocalTarget().getAddress());
cluster.restartNameNode(0);
waitForHAState(0,HAServiceState.STANDBY);
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
cluster.shutdownNameNode(1);
waitForHAState(0,HAServiceState.ACTIVE);
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr2.zkfc.getLocalTarget().getAddress());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test if StandbyException can be thrown from StandbyNN, when it's requested for
* password. (HDFS-6475). With StandbyException, the client can failover to try
* activeNN.
*/
@Test public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager stSecretManager=NameNodeAdapter.getDtSecretManager(nn1.getNamesystem());
final Token token=getDelegationToken(fs,"JobTracker");
final DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
assertTrue(null != stSecretManager.retrievePassword(identifier));
final UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker");
ugi.addToken(token);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run(){
try {
try {
byte[] tmppw=dtSecretManager.retrievePassword(identifier);
fail("InvalidToken with cause StandbyException is expected" + " since nn0 is standby");
return tmppw;
}
catch ( IOException e) {
throw new SecurityException("Failed to obtain user group information: " + e,e);
}
}
catch ( Exception oe) {
HttpServletResponse response=mock(HttpServletResponse.class);
ExceptionHandler eh=new ExceptionHandler();
eh.initResponse(response);
Response resp=eh.toResponse(oe);
Map,?> m=(Map,?>)JSON.parse(resp.getEntity().toString());
RemoteException re=JsonUtil.toRemoteException(m);
Exception unwrapped=((RemoteException)re).unwrapRemoteException(StandbyException.class);
assertTrue(unwrapped instanceof StandbyException);
return null;
}
}
}
);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test if correct exception (StandbyException or RetriableException) can be
* thrown during the NN failover.
*/
@Test public void testDelegationTokenDuringNNFailover() throws Exception {
EditLogTailer editLogTailer=nn1.getNamesystem().getEditLogTailer();
editLogTailer.stop();
Configuration conf=(Configuration)Whitebox.getInternalState(editLogTailer,"conf");
nn1.getNamesystem().setEditLogTailerForTests(new EditLogTailerForTest(nn1.getNamesystem(),conf));
final Token token=getDelegationToken(fs,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token,"JobTracker");
cluster.transitionToStandby(0);
try {
cluster.getNameNodeRpc(0).renewDelegationToken(token);
fail("StandbyException is expected since nn0 is in standby state");
}
catch ( StandbyException e) {
GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(),e);
}
new Thread(){
@Override public void run(){
try {
cluster.transitionToActive(1);
}
catch ( Exception e) {
LOG.error("Transition nn1 to active failed",e);
}
}
}
.start();
Thread.sleep(1000);
try {
nn1.getNamesystem().verifyToken(token.decodeIdentifier(),token.getPassword());
fail("RetriableException/StandbyException is expected since nn1 is in transition");
}
catch ( IOException e) {
assertTrue(e instanceof StandbyException || e instanceof RetriableException);
LOG.info("Got expected exception",e);
}
catchup=true;
synchronized (this) {
this.notifyAll();
}
Configuration clientConf=dfs.getConf();
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenDFSApi() throws Exception {
final Token token=getDelegationToken(fs,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token,"JobTracker");
Configuration clientConf=dfs.getConf();
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
Configuration emptyConf=new Configuration();
try {
doRenewOrCancel(token,emptyConf,TokenTestAction.RENEW);
fail("Did not throw trying to renew with an empty conf!");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Unable to map logical nameservice URI",ioe);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL);
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testTailer() throws IOException, InterruptedException, ServiceFailedException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
try {
for (int i=0; i < DIRS_TO_MAKE / 2; i++) {
NameNodeAdapter.mkdirs(nn1,getDirPath(i),new PermissionStatus("test","test",new FsPermission((short)00755)),true);
}
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
for (int i=0; i < DIRS_TO_MAKE / 2; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,getDirPath(i),false).isDir());
}
for (int i=DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
NameNodeAdapter.mkdirs(nn1,getDirPath(i),new PermissionStatus("test","test",new FsPermission((short)00755)),true);
}
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
for (int i=DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,getDirPath(i),false).isDir());
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void ensureSerialNumbersNeverOverlap(){
BlockTokenSecretManager btsm1=cluster.getNamesystem(0).getBlockManager().getBlockTokenSecretManager();
BlockTokenSecretManager btsm2=cluster.getNamesystem(1).getBlockManager().getBlockTokenSecretManager();
btsm1.setSerialNo(0);
btsm2.setSerialNo(0);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MAX_VALUE);
btsm2.setSerialNo(Integer.MAX_VALUE);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MIN_VALUE);
btsm2.setSerialNo(Integer.MIN_VALUE);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MAX_VALUE / 2);
btsm2.setSerialNo(Integer.MAX_VALUE / 2);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MIN_VALUE / 2);
btsm2.setSerialNo(Integer.MIN_VALUE / 2);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test public void testFailureOfSharedDir() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000);
MiniDFSCluster cluster=null;
File sharedEditsDir=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/test1")));
URI sharedEditsUri=cluster.getSharedEditsDir(0,1);
sharedEditsDir=new File(sharedEditsUri);
assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1=cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode());
NameNode nn0=cluster.getNameNode(0);
try {
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee);
}
for ( URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir=new File(editsUri.getPath());
File curDir=new File(editsDir,"current");
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1));
}
}
finally {
if (sharedEditsDir != null) {
FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test that the shared edits dir is automatically added to the list of edits
* dirs that are marked required.
*/
@Test public void testSharedDirIsAutomaticallyMarkedRequired() throws URISyntaxException {
URI foo=new URI("file:/foo");
URI bar=new URI("file:/bar");
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(foo,bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,foo.toString());
assertFalse(FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,bar.toString());
Collection requiredEditsDirs=FSNamesystem.getRequiredNamespaceEditsDirs(conf);
assertTrue(Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar,requiredEditsDirs.contains(bar));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Ensure that the standby fails to become active if it cannot read all
* available edits in the shared edits dir when it is transitioning to active
* state.
*/
@Test public void testFailureToReadEditsOnTransitionToActive() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Standby transitioned to active, but should not have been able to");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("Error replaying edit log",ee);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that the standby NN won't double-replay earlier edits if it encounters
* a failure to read a later edit.
*/
@Test public void testFailuretoReadEdits() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fs.setOwner(new Path(TEST_DIR1),"foo","bar");
assertTrue(fs.delete(new Path(TEST_DIR1),true));
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
LimitedEditLogAnswer answer=causeFailureOnEditLogRead();
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false));
answer.setThrowExceptionOnRead(false);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false).isDir());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the following case:
* 1. SBN is reading a finalized edits file when NFS disappears halfway
* through (or some intermittent error happens)
* 2. SBN performs a checkpoint and uploads it to the NN
* 3. NN receives a checkpoint that doesn't correspond to the end of any log
* segment
* 4. Both NN and SBN should be able to restart at this point.
* This is a regression test for HDFS-2766.
*/
@Test public void testCheckpointStartingMidEditsFile() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3,5));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
cluster.restartNameNode(0);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
FileSystem fs0=null;
try {
fs0=FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),conf);
assertTrue(fs0.exists(new Path(TEST_DIR1)));
assertTrue(fs0.exists(new Path(TEST_DIR2)));
assertTrue(fs0.exists(new Path(TEST_DIR3)));
}
finally {
if (fs0 != null) fs0.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHAMetrics() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,Integer.MAX_VALUE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FileSystem fs=null;
try {
cluster.waitActive();
FSNamesystem nn0=cluster.getNamesystem(0);
FSNamesystem nn1=cluster.getNamesystem(1);
assertEquals(nn0.getHAState(),"standby");
assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
assertEquals(nn1.getHAState(),"standby");
assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
cluster.transitionToActive(0);
assertEquals("active",nn0.getHAState());
assertEquals(0,nn0.getMillisSinceLastLoadedEdits());
assertEquals("standby",nn1.getHAState());
assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertEquals("standby",nn0.getHAState());
assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
assertEquals("active",nn1.getHAState());
assertEquals(0,nn1.getMillisSinceLastLoadedEdits());
Thread.sleep(2000);
assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits());
assertEquals(0,nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,new Path("/foo"),10,(short)1,1L);
assertTrue(0 < nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
long millisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits();
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1),cluster.getNameNode(0));
assertEquals(0,nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
long newMillisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits();
assertTrue("expected " + millisSinceLastLoadedEdits + " > "+ newMillisSinceLastLoadedEdits,millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits);
}
finally {
IOUtils.cleanup(LOG,fs);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Similar to {@link #testBlocksRemovedWhileInSafeMode()} except that
* the OP_DELETE edits arrive at the SBN before the block deletion reports.
* The tracking of safe blocks needs to properly account for the removal
* of the blocks as well as the safe count. This is a regression test for
* HDFS-2742.
*/
@Test public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
DFSTestUtil.createFile(fs,new Path("/test"),10 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
banner("Restarting standby");
restartStandby();
String status=nn1.getNamesystem().getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON. The reported blocks 10 has reached the threshold " + "0.9990 of total blocks 10. The number of live datanodes 3 has " + "reached the minimum number 0. In safe mode extension. "+ "Safe mode will be turned off automatically"));
banner("Removing the blocks without rolling the edit log");
fs.delete(new Path("/test"),true);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
assertSafeMode(nn1,0,0,3,0);
banner("Triggering sending deletions to DNs and Deletion Reports");
BlockManagerTestUtil.computeAllPendingWork(nn0.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertSafeMode(nn1,0,0,3,0);
}
InternalCallVerifier BooleanVerifier
/**
* Test case for enter safemode in standby namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test public void testEnterSafeModeInSBNShouldNotThrowNPE() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
DFSTestUtil.createFile(fs,new Path("/test"),3 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
banner("Creating some blocks that won't be in the edit log");
DFSTestUtil.createFile(fs,new Path("/test2"),5 * BLOCK_SIZE,(short)3,1L);
banner("Deleting the original blocks");
fs.delete(new Path("/test"),true);
banner("Restarting standby");
restartStandby();
FSNamesystem namesystem=nn1.getNamesystem();
String status=namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn1,false);
assertTrue("Failed to enter into safemode in standby",namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn1,false);
assertTrue("Failed to enter into safemode in standby",namesystem.isInSafeMode());
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
* @throws Exception
*/
@Test public void testIsInSafemode() throws Exception {
NameNode nn2=cluster.getNameNode(1);
assertTrue("nn2 should be in standby state",nn2.isStandbyState());
InetSocketAddress nameNodeAddress=nn2.getNameNodeAddress();
Configuration conf=new Configuration();
DistributedFileSystem dfs=new DistributedFileSystem();
try {
dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"+ nameNodeAddress.getPort()),conf);
dfs.isInSafeMode();
fail("StandBy should throw exception for isInSafeMode");
}
catch ( IOException e) {
if (e instanceof RemoteException) {
IOException sbExcpetion=((RemoteException)e).unwrapRemoteException();
assertTrue("StandBy nn should not support isInSafeMode",sbExcpetion instanceof StandbyException);
}
else {
throw e;
}
}
finally {
if (null != dfs) {
dfs.close();
}
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
DistributedFileSystem dfsWithFailOver=(DistributedFileSystem)fs;
assertTrue("ANN should be in SafeMode",dfsWithFailOver.isInSafeMode());
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
assertFalse("ANN should be out of SafeMode",dfsWithFailOver.isInSafeMode());
}
InternalCallVerifier BooleanVerifier
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil.createFile(fs,new Path("/test"),3 * BLOCK_SIZE,(short)3,1L);
restartActive();
nn0.getRpcServer().transitionToActive(new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem=nn0.getNamesystem();
String status=namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0,false);
assertTrue("Failed to enter into safemode in active",namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0,false);
assertTrue("Failed to enter into safemode in active",namesystem.isInSafeMode());
}
InternalCallVerifier BooleanVerifier
/**
* Make sure the client retries when the active NN is in safemode
*/
@Test(timeout=300000) public void testClientRetrySafeMode() throws Exception {
final Map results=Collections.synchronizedMap(new HashMap());
final Path test=new Path("/test");
NameNodeAdapter.enterSafeMode(nn0,false);
SafeModeInfo safeMode=(SafeModeInfo)Whitebox.getInternalState(nn0.getNamesystem(),"safeMode");
Whitebox.setInternalState(safeMode,"extension",Integer.valueOf(30000));
LOG.info("enter safemode");
new Thread(){
@Override public void run(){
try {
boolean mkdir=fs.mkdirs(test);
LOG.info("mkdir finished, result is " + mkdir);
synchronized (TestHASafeMode.this) {
results.put(test,mkdir);
TestHASafeMode.this.notifyAll();
}
}
catch ( Exception e) {
LOG.info("Got Exception while calling mkdir",e);
}
}
}
.start();
assertFalse("The directory should not be created while NN in safemode",fs.exists(test));
Thread.sleep(1000);
NameNodeAdapter.leaveSafeMode(nn0);
LOG.info("leave safemode");
synchronized (this) {
while (!results.containsKey(test)) {
this.wait();
}
assertTrue(results.get(test));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that when we transition to active in safe mode that we don't
* prematurely consider blocks missing just because not all DNs have reported
* yet.
* This is a regression test for HDFS-3921.
*/
@Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
cluster.stopDataNode(1);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(cluster.getNameNode(0).isInSafeMode());
assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount());
}
InternalCallVerifier BooleanVerifier
/**
* Test NN crash and client crash/stuck immediately after block allocation
*/
@Test(timeout=100000) public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"1.0f");
String testData="testData";
cluster.getConfiguration(0).setInt("io.bytes.per.checksum",testData.length());
cluster.restartNameNode(0);
try {
cluster.waitActive();
cluster.transitionToActive(0);
cluster.transitionToStandby(1);
DistributedFileSystem dfs=cluster.getFileSystem(0);
String pathString="/tmp1.txt";
Path filePath=new Path(pathString);
FSDataOutputStream create=dfs.create(filePath,FsPermission.getDefault(),true,1024,(short)3,testData.length(),null);
create.write(testData.getBytes());
create.hflush();
long fileId=((DFSOutputStream)create.getWrappedStream()).getFileId();
FileStatus fileStatus=dfs.getFileStatus(filePath);
DFSClient client=DFSClientAdapter.getClient(dfs);
ExtendedBlock previousBlock=DFSClientAdapter.getPreviousBlock(client,fileId);
DFSClientAdapter.getNamenode(client).addBlock(pathString,client.getClientName(),new ExtendedBlock(previousBlock),new DatanodeInfo[0],DFSClientAdapter.getFileId((DFSOutputStream)create.getWrappedStream()),null);
cluster.restartNameNode(0,true);
cluster.restartDataNode(0);
cluster.transitionToActive(0);
Thread.sleep(2000);
FSDataInputStream is=dfs.open(filePath);
is.close();
dfs.recoverLease(filePath);
assertTrue("Recovery also should be success",dfs.recoverLease(filePath));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that delegation tokens continue to work after the failover.
*/
@Test public void testDelegationTokensAfterFailover() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
String renewer=UserGroupInformation.getLoginUser().getUserName();
Token token=nn1.getRpcServer().getDelegationToken(new Text(renewer));
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
nn2.getRpcServer().renewDelegationToken(token);
nn2.getRpcServer().cancelDelegationToken(token);
token=nn2.getRpcServer().getDelegationToken(new Text(renewer));
Assert.assertTrue(token != null);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for HDFS-2812. Since lease renewals go from the client
* only to the active NN, the SBN will have out-of-date lease
* info when it becomes active. We need to make sure we don't
* accidentally mark the leases as expired when the failover
* proceeds.
*/
@Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FSDataOutputStream stm=null;
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
try {
cluster.waitActive();
cluster.transitionToActive(0);
LOG.info("Starting with NN 0 active");
stm=fs.create(TEST_FILE_PATH);
long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR);
assertTrue(nn0t0 > 0);
long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertEquals("Lease should not yet exist on nn1",-1,nn1t0);
Thread.sleep(5);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0);
Thread.sleep(5);
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs.
*/
@Test public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
Configuration conf=new HdfsConfiguration();
HATestUtil.setFailoverConfigurations(cluster,conf);
List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster));
assertEquals(2,namenodes.size());
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(0);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(0);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(1);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(1);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test which takes a single node and flip flops between
* active and standby mode, making sure it doesn't
* double-play any edits.
*/
@Test public void testTransitionActiveToStandby() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=cluster.getFileSystem(0);
fs.mkdirs(TEST_DIR);
cluster.transitionToStandby(0);
try {
fs.mkdirs(new Path("/x"));
fail("Didn't throw trying to mutate FS in standby state");
}
catch ( Throwable t) {
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
}
cluster.transitionToActive(0);
DFSTestUtil.createFile(fs,new Path(TEST_DIR,"foo"),10,(short)1,1L);
fs.delete(TEST_DIR,true);
cluster.transitionToStandby(0);
cluster.transitionToActive(0);
assertFalse(fs.exists(TEST_DIR));
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* The secret manager needs to start/stop - the invariant should be that
* the secret manager runs if and only if the NN is active and not in
* safe mode. As a state diagram, we need to test all of the following
* transitions to make sure the secret manager is started when we transition
* into state 4, but none of the others.
*
* SafeMode Not SafeMode
* Standby 1 <------> 2
* ^ ^
* | |
* v v
* Active 3 <------> 4
*
*/
@Test(timeout=60000) public void testSecretManagerState() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,50);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).waitSafeMode(false).build();
try {
cluster.transitionToActive(0);
DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,6000,(short)1,1L);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,60000);
cluster.restartNameNode(0);
NameNode nn=cluster.getNameNode(0);
banner("Started in state 1.");
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->2. Should not start secret manager");
NameNodeAdapter.leaveSafeMode(nn);
assertTrue(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 2->1. Should not start secret manager.");
NameNodeAdapter.enterSafeMode(nn,false);
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->3. Should not start secret manager.");
nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 3->1. Should not start secret manager.");
nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->3->4. Should start secret manager.");
nn.getRpcServer().transitionToActive(REQ_INFO);
NameNodeAdapter.leaveSafeMode(nn);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
banner("Transition 4->3. Should stop secret manager");
NameNodeAdapter.enterSafeMode(nn,false);
assertFalse(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 3->4. Should start secret manager");
NameNodeAdapter.leaveSafeMode(nn);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
for (int i=0; i < 20; i++) {
banner("Transition 4->2. Should stop secret manager.");
nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 2->4. Should start secret manager");
nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testInitializeSharedEdits() throws Exception {
assertCannotStartNameNodes();
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
assertCanStartHaNameNodes("1");
shutdownClusterAndRemoveSharedEditsDir();
assertCannotStartNameNodes();
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
assertCanStartHaNameNodes("2");
}
BooleanVerifier
@Test public void testDontOverWriteExistingDir() throws IOException {
assertFalse(NameNode.initializeSharedEdits(conf,false));
assertTrue(NameNode.initializeSharedEdits(conf,false));
}
InternalCallVerifier BooleanVerifier
@Test public void testFailWhenNoSharedEditsSpecified() throws Exception {
Configuration confNoShared=new Configuration(conf);
confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
assertFalse(NameNode.initializeSharedEdits(confNoShared,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChangedStorageId() throws IOException, URISyntaxException, InterruptedException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).nnTopology(MiniDFSNNTopology.simpleHATopology()).build();
try {
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
OutputStream out=fs.create(filePath);
out.write("foo bar baz".getBytes());
out.close();
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),cluster.getNameNode(1));
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,filePath);
assertTrue(MiniDFSCluster.changeGenStampOfBlock(0,block,900));
DataNodeProperties dnProps=cluster.stopDataNode(0);
cluster.restartNameNode(1,false);
assertTrue(cluster.restartDataNode(dnProps,true));
while (cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount() < 1) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
}
assertEquals(1,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
String oldStorageId=getRegisteredDatanodeUid(cluster,1);
assertTrue(wipeAndRestartDn(cluster,0));
String newStorageId="";
do {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
newStorageId=getRegisteredDatanodeUid(cluster,1);
System.out.println("====> oldStorageId: " + oldStorageId + " newStorageId: "+ newStorageId);
}
while (newStorageId.equals(oldStorageId));
assertEquals(0,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the scenario where the NN fails over after issuing a block
* synchronization request, but before it is committed. The
* DN running the recovery should then fail to commit the synchronization
* and a later retry will succeed.
*/
@Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_SIZE / 2);
stm.hflush();
NameNode nn0=cluster.getNameNode(0);
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
DatanodeDescriptor expectedPrimary=DFSTestUtil.getExpectedPrimaryNode(nn0,blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
DataNode primaryDN=cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(primaryDN,nn0);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk),Mockito.anyInt(),Mockito.anyLong(),Mockito.eq(true),Mockito.eq(false),(DatanodeID[])Mockito.anyObject(),(String[])Mockito.anyObject());
DistributedFileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
assertFalse(fsOtherUser.recoverLease(TEST_PATH));
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
delayer.waitForResult();
Throwable t=delayer.getThrown();
if (t == null) {
fail("commitBlockSynchronization call did not fail on standby");
}
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests lease recovery if a client crashes. This approximates the
* use case of HBase WALs being recovered after a NN failover.
*/
@Test(timeout=30000) public void testLeaseRecoveryAfterFailover() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_AND_A_HALF);
stm.hflush();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(TEST_PATH));
FileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* 1. Run a set of operations
* 2. Trigger the NN failover
* 3. Check the retry cache on the original standby NN
*/
@Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception {
DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0);
FSNamesystem fsn0=cluster.getNamesystem(0);
LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
FSNamesystem fsn1=cluster.getNamesystem(1);
cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testReadsAllowedDuringCheckpoint() throws Exception {
FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1);
DelayAnswer answerer=new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.any(NameNodeFile.class),Mockito.any(Canceler.class));
doEdits(0,1000);
nn0.getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
Thread t=new Thread(){
@Override public void run(){
try {
nn1.getRpcServer().restoreFailedStorage("false");
}
catch ( IOException e) {
e.printStackTrace();
}
}
}
;
t.start();
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
assertFalse(nn1.getNamesystem().getFsLockForTests().hasQueuedThreads());
assertFalse(nn1.getNamesystem().getFsLockForTests().isWriteLocked());
assertTrue(nn1.getNamesystem().getLongReadLockForTests().hasQueuedThreads());
String pageContents=DFSTestUtil.urlGet(new URL("http://" + nn1.getHttpAddress().getHostName() + ":"+ nn1.getHttpAddress().getPort()+ "/jmx"));
assertTrue(pageContents.contains("NumLiveDataNodes"));
assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
answerer.proceed();
answerer.waitForResult();
assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
t.join();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Make sure that clients will receive StandbyExceptions even when a
* checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
* thread will have FSNS lock. Regression test for HDFS-4591.
*/
@Test(timeout=300000) public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1);
DelayAnswer answerer=new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.eq(NameNodeFile.IMAGE),Mockito.any(Canceler.class));
doEdits(0,1000);
nn0.getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
try {
nn1.getRpcServer().getFileInfo("/");
fail("Should have thrown StandbyException, but instead succeeded.");
}
catch ( StandbyException se) {
GenericTestUtils.assertExceptionContains("is not supported",se);
}
assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
answerer.proceed();
answerer.waitForResult();
assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
BooleanVerifier
/**
* Test cancellation of ongoing checkpoints when failover happens
* mid-checkpoint.
*/
@Test(timeout=120000) public void testCheckpointCancellation() throws Exception {
cluster.transitionToStandby(0);
URI sharedUri=cluster.getSharedEditsDir(0,1);
File sharedDir=new File(sharedUri.getPath(),"current");
File tmpDir=new File(MiniDFSCluster.getBaseDirectory(),"testCheckpointCancellation-tmp");
FSNamesystem fsn=cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir,NUM_DIRS_IN_LOG,3,fsn.getLastInodeId() + 1);
String fname=NNStorage.getInProgressEditsFileName(3);
new File(tmpDir,fname).renameTo(new File(sharedDir,fname));
cluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,0);
cluster.restartNameNode(1);
nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
boolean canceledOne=false;
for (int i=0; i < 10 && !canceledOne; i++) {
doEdits(i * 10,i * 10 + 10);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
canceledOne=StandbyCheckpointer.getCanceledCount() > 0;
}
assertTrue(canceledOne);
}
APIUtilityVerifier BooleanVerifier
/**
* Test NN checkpoint and transaction-related metrics.
*/
@Test public void testTransactionAndCheckpointMetrics() throws Exception {
long lastCkptTime=MetricsAsserts.getLongGauge("LastCheckpointTime",getMetrics(NS_METRICS));
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH,"/tmp"));
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",2L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",2L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",2L,getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",4L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",4L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
long newLastCkptTime=MetricsAsserts.getLongGauge("LastCheckpointTime",getMetrics(NS_METRICS));
assertTrue(lastCkptTime < newLastCkptTime);
assertGauge("LastWrittenTransactionId",6L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test snapshot during file appending, before the corresponding{@link FSDataOutputStream} instance closes.
*/
@Test(timeout=60000) public void testSnapshotWhileAppending() throws Exception {
Path file=new Path(dir,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
out.close();
INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize());
INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory();
DirectoryDiff last=dirNode.getDiffs().getLast();
out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
dirNode=fsdir.getINode(dir.toString()).asDirectory();
assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize(last.getSnapshotId()));
hdfs.createSnapshot(dir,"s1");
out.close();
fileNode=(INodeFile)fsdir.getINode(file.toString());
dirNode=fsdir.getINode(dir.toString()).asDirectory();
last=dirNode.getDiffs().getLast();
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId()));
hdfs.setReplication(file,(short)(REPLICATION - 1));
out=appendFileWithoutClosing(file,BLOCKSIZE);
hdfs.createSnapshot(dir,"s2");
out.close();
assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
* blocks within the size range are returned.
*/
@Test public void testGetBlockLocations() throws Exception {
final Path root=new Path("/");
final Path file=new Path("/file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
final Path fileInSnapshot=SnapshotTestHelper.getSnapshotPath(root,"s1",file.getName());
FileStatus status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE - 1);
status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
LocatedBlocks blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot.toString(),0,Long.MAX_VALUE);
List blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE,blocks.getFileLength());
assertEquals(1,blockList.size());
LocatedBlock lastBlock=blocks.getLastLocatedBlock();
assertEquals(0,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
SnapshotTestHelper.createSnapshot(hdfs,root,"s2");
final Path fileInSnapshot2=SnapshotTestHelper.getSnapshotPath(root,"s2",file.getName());
HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status=hdfs.getFileStatus(fileInSnapshot2);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 3 - 1,status.getLen());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),0,Long.MAX_VALUE);
assertFalse(blocks.isUnderConstruction());
assertTrue(blocks.isLastBlockComplete());
blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE * 2 - 1,blocks.getFileLength());
assertEquals(2,blockList.size());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),BLOCKSIZE,0);
blockList=blocks.getLocatedBlocks();
assertEquals(1,blockList.size());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),file.toString(),0,Long.MAX_VALUE);
blockList=blocks.getLocatedBlocks();
assertEquals(3,blockList.size());
assertTrue(blocks.isUnderConstruction());
assertFalse(blocks.isLastBlockComplete());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE * 2,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE - 1,lastBlock.getBlockSize());
out.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception {
final String dirStr="/testSnapshotWithQuota/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
final int NS_QUOTA=6;
hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET);
final Path foo=new Path(dir,"foo");
final Path f1=new Path(foo,"f1");
DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED);
{
final Path snapshotPath=hdfs.createSnapshot(dir);
final String snapshotName=snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName));
final Path parent=snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName());
Assert.assertEquals(dir,parent.getParent());
}
final Path f2=new Path(foo,"f2");
DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED);
try {
final Path f3=new Path(foo,"f3");
DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.createSnapshot(dir);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.setPermission(f1,new FsPermission((short)0));
Assert.fail();
}
catch ( RemoteException e) {
Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass());
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
hdfs.setPermission(f2,new FsPermission((short)0));
hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET);
hdfs.createSnapshot(dir,"s1");
hdfs.setPermission(foo,new FsPermission((short)0444));
}
InternalCallVerifier BooleanVerifier
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir=new Path("/dir");
final Path sub=new Path(dir,"sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
final Path file=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
INode subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test(timeout=300000) public void testIdCmp(){
final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)};
Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null));
for ( Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0);
for ( Snapshot t : snapshots) {
final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
final int computed=Snapshot.ID_COMPARATOR.compare(s,t);
Assert.assertEquals(expected > 0,computed > 0);
Assert.assertEquals(expected == 0,computed == 0);
Assert.assertEquals(expected < 0,computed < 0);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr="/testRenameWithSnapshot";
final String abcStr=dirStr + "/abc";
final Path abc=new Path(abcStr);
hdfs.mkdirs(abc,new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo=new Path(abc,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(abc,"s0");
try {
hdfs.rename(abc,new Path(dirStr,"tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e);
}
final String xyzStr=dirStr + "/xyz";
final Path xyz=new Path(xyzStr);
hdfs.mkdirs(xyz,new FsPermission((short)0777));
final Path bar=new Path(xyz,"bar");
hdfs.rename(foo,bar);
final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2,withCount.getReferenceCount());
final INode barRef=fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount,barRef.asReference().getReferredINode());
hdfs.delete(bar,false);
Assert.assertEquals(1,withCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test public void testCleanDstReference() throws Exception {
final Path test=new Path("/test");
final Path foo=new Path(test,"foo");
final Path bar=new Path(foo,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
final Path fileInBar=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(test,"foo2");
hdfs.rename(foo,foo2);
hdfs.createSnapshot(test,"s1");
hdfs.delete(new Path(foo2,"bar"),true);
hdfs.delete(foo2,true);
final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test,"s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar");
INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List diffList=barNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test public void testRenameUndo_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
INode fooNode_s1=fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Unit test for HDFS-4842.
*/
@Test public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
SnapshotTestHelper.createSnapshot(hdfs,test,"s1");
hdfs.delete(file,true);
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
hdfs.deleteSnapshot(test,"s1");
final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List dir1DiffList=dir1Node.getDiffs().asList();
assertEquals(1,dir1DiffList.size());
List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,cList.size());
INode cNode=cList.get(0);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertSame(cNode,fooNode);
final Path newbar=new Path(newfoo,bar.getName());
INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(),barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
DirectoryDiff diff=barDiffList.get(0);
INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(),diff.getSnapshotId());
assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
List dir2DiffList=dir2Node.getDiffs().asList();
assertEquals(1,dir2DiffList.size());
dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1,dList.size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName());
INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0),fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file and then append the same file.
*/
@Test public void testRenameAndAppend() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,snap1);
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
INode fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
FSDataOutputStream out=hdfs.append(foo2);
try {
byte[] content=new byte[1024];
(new Random()).nextBytes(content);
out.write(content);
fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode=fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.isUnderConstruction());
}
finally {
if (out != null) {
out.close();
}
}
fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode=fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertFalse(fooNode.isUnderConstruction());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test public void testRenameUndo_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
assertFalse(hdfs.exists(foo_s1));
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Only create snapshots in the beginning (before the rename).
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar2_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar2_dir2=new Path(sdir2,"bar");
hdfs.rename(bar2_dir1,bar2_dir2);
restartClusterAndCheckImage(true);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar2_dir2,REPL_1);
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1");
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1,statusBar1.getReplication());
FileStatus statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1,statusBar2.getReplication());
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar2_dir3=new Path(sdir3,"bar");
hdfs.rename(bar2_dir2,bar2_dir3);
restartClusterAndCheckImage(true);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar2_dir3,REPL_2);
final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1");
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2,statusBar2.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar2_dir3,bar2_dir2);
restartClusterAndCheckImage(true);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar2_dir2,REPL);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL,statusBar2.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar2_dir2,bar2_dir1);
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(2,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
assertEquals(1,foo.getDiffs().asList().size());
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1,bar1.getDiffs().asList().size());
assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(2,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
assertEquals(1,bar.getDiffs().asList().size());
assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar2_dir1,true);
restartClusterAndCheckImage(true);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
fooRef=fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWithCount.getReferenceCount());
barRef=fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(1,barWithCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of the second-time rename.
*/
@Test public void testRenameUndo_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
final Path foo_dir2=new Path(sdir2,"foo2");
final Path foo_dir3=new Path(sdir3,"foo3");
hdfs.rename(foo,foo_dir2);
boolean result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
List dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(1,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode=fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
hdfs.createSnapshot(sdir2,"s3");
result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode=fsdir.getINode4Write(foo_dir2.toString());
dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(2,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId());
childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff=dir2Diffs.get(1).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(2,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId());
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test public void testRenameUndo_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(sdir2,"foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo3=new Path(sdir3,"foo3");
hdfs.rename(foo2,foo3);
INode foo3Node=fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo,foo3,Rename.OVERWRITE);
fail("the rename from " + foo + " to "+ foo3+ " should fail");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e);
}
final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node,foo3Node_undo);
INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode();
assertEquals(2,foo3_wc.getReferenceCount());
assertSame(foo3Node,foo3_wc.getParentReference());
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file under a snapshottable directory, file does not exist
* in a snapshot.
*/
@Test(timeout=60000) public void testRenameFileNotInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,snap1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.rename(file1,file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.CREATE,file2.getName(),null));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.rename(foo2,foo);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
final INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(2,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
assertEquals(bar2.getName(),children.get(1).getLocalName());
assertEquals(bar3.getName(),children.get(2).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(2,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
final INode fooRef2=fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode();
assertSame(wc,wc2);
assertSame(fooRef2,wc.getParentReference());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Create snapshots after each rename.
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar_dir2=new Path(sdir2,"bar");
hdfs.rename(bar_dir1,bar_dir2);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar_dir2,REPL_1);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33");
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar_dir3=new Path(sdir3,"bar");
hdfs.rename(bar_dir2,bar_dir3);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar_dir3,REPL_2);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333");
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1");
final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1");
final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar");
final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
FileStatus statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar_dir3,bar_dir2);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar_dir2,REPL);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222");
final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1");
final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL,statusBar1.getReplication());
statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir2);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s2222);
assertEquals(REPL,statusBar.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar_dir2,bar_dir1);
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(5,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
List fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(5,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
List barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar_dir1,true);
restartClusterAndCheckImage(true);
final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1");
final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo");
fooRef=fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(4,fooWithCount.getReferenceCount());
foo=fooWithCount.asDirectory();
fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
barRef=fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(4,barWithCount.getReferenceCount());
bar=barWithCount.asFile();
barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test public void testRenameUndo_5() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path subdir2=new Path(dir2,"subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo=new Path(dir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
final Path foo2=new Path(subdir2,foo.getName());
boolean rename=hdfs.rename(foo,foo2);
assertFalse(rename);
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode=fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode,barNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(3,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
diffList=dir2Node.getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.delete(foo2,true);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(1,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(0,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Rename and deletion snapshot under the same the snapshottable directory.
*/
@Test public void testRenameDirAndDeleteSnapshot_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
hdfs.delete(file,true);
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
final Path foo_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo");
assertTrue("the snapshot path " + foo_s0 + " should exist",hdfs.exists(foo_s0));
hdfs.deleteSnapshot(test,"s0");
assertFalse("after deleting s0, " + foo_s0 + " should not exist",hdfs.exists(foo_s0));
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue("the diff list of " + dir2 + " should be empty after deleting s0",dir2Node.getDiffs().asList().isEmpty());
assertTrue(hdfs.exists(newfoo));
INode fooRefNode=fsdir.getINode4Write(newfoo.toString());
assertTrue(fooRefNode instanceof INodeReference.DstReference);
INodeDirectory fooNode=fooRefNode.asDirectory();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0).asDirectory();
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test public void testRenameExceedQuota() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subfile_dir2=new Path(sub_dir2,"subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED);
final Path foo=new Path(dir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE);
INode dir2Node=fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(7,counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test the rename undo when removing dst node fails
*/
@Test public void testRenameUndo_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subsub_dir2=new Path(sub_dir2,"subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1);
try {
hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE);
fail("Expect QuotaExceedException");
}
catch ( QuotaExceededException e) {
String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5";
GenericTestUtils.assertExceptionContains(msg,e);
}
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node,fooNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(4,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node,subsubdir2Node.getParent());
diffList=(dir2Node).getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList=subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0,diffList.size());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRenameFileInSubDirOfDirWithSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub2file1=new Path(sub2,"sub2file1");
final Path sub2file2=new Path(sub2,"sub2file2");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2file1,sub2file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,sub2.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName() + "/" + sub2file1.getName(),sub2.getName() + "/" + sub2file2.getName()));
}
InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
/**
* This test demonstrates that {@link INodeDirectory#removeChild(INode,Snapshot)}and {@link INodeDirectory#addChild(INode,boolean,Snapshot)}should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
* added/removed child should be recorded in snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_5() throws Exception {
final Path dir1=new Path("/dir1");
final Path dir2=new Path("/dir2");
final Path dir3=new Path("/dir3");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
hdfs.mkdirs(dir3);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.deleteSnapshot(dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path foo2=new Path(dir2,foo.getName());
hdfs.rename(foo,foo2);
final Path bar2=new Path(dir2,"foo/bar");
final Path bar3=new Path(dir3,"bar");
hdfs.rename(bar2,bar3);
hdfs.delete(foo2,true);
assertTrue(hdfs.exists(bar3));
INodeFile barNode=(INodeFile)fsdir.getINode4Write(bar3.toString());
assertSame(fsdir.getINode4Write(dir3.toString()),barNode.getParent());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test public void testRenameUndo_7() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,root,snap1);
final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar,invalid);
fail("expect exception since invalid name is used for rename");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
}
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(),diff.getSnapshotId());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode,children.get(0));
assertSame(fooNode,barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
FileDiff barDiff=barDiffList.get(0);
assertEquals(s1.getId(),barDiff.getSnapshotId());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename a single file across snapshottable dirs.
*/
@Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following steps:
*
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
*
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.setReplication(bar2,REPL_1);
hdfs.delete(bar,true);
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(snapshotBar));
final Path newBar2=new Path(newfoo,"bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2,true);
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status=hdfs.getFileStatus(bar2_s2);
assertEquals(REPL,status.getReplication());
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRenameTwiceInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sub1,snap1);
hdfs.rename(file1,file2);
hdfs.createSnapshot(sub1,snap2);
hdfs.rename(file2,file3);
SnapshotDiffReport diffReport;
diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,snap2);
LOG.info("DiffList is " + diffReport.toString());
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file2.getName()));
diffReport=hdfs.getSnapshotDiffReport(sub1,snap2,"");
LOG.info("DiffList is " + diffReport.toString());
entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file2.getName(),file3.getName()));
diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
LOG.info("DiffList is " + diffReport.toString());
entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file3.getName()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After rename, delete the snapshot in src
*/
@Test public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
restartClusterAndCheckImage(true);
final Path bar2=new Path(newfoo,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newfoo,true);
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
assertTrue(hdfs.exists(bar_s4));
hdfs.deleteSnapshot(sdir1,"s4");
restartClusterAndCheckImage(true);
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
hdfs.deleteSnapshot(sdir2,"s3");
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWC.getReferenceCount());
INodeDirectory fooDir=fooWC.getReferredINode().asDirectory();
List diffs=fooDir.getDiffs().asList();
assertEquals(1,diffs.size());
assertEquals(s2.getId(),diffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
}
InternalCallVerifier BooleanVerifier
/**
* Test rename from a non-snapshottable dir to a snapshottable dir
*/
@Test(timeout=60000) public void testRenameFromNonSDir2SDir() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir2,snap1);
final Path newfoo=new Path(sdir2,"foo");
hdfs.rename(foo,newfoo);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertTrue(fooNode instanceof INodeDirectory);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRenameWithOverWrite() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path file1InFoo=new Path(foo,"file1");
final Path file2InFoo=new Path(foo,"file2");
final Path file3InFoo=new Path(foo,"file3");
DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED);
final Path bar=new Path(root,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,root,"s0");
final Path fileInBar=new Path(bar,"file1");
hdfs.rename(file1InFoo,fileInBar);
final Path newDir=new Path(root,"newDir");
hdfs.rename(bar,newDir);
final Path file2InNewDir=new Path(newDir,"file2");
hdfs.rename(file2InFoo,file2InNewDir);
final Path file1InNewDir=new Path(newDir,"file1");
hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1");
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List entries=report.getDiffList();
assertEquals(7,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test renaming a file and then delete snapshots.
*/
@Test public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
hdfs.createSnapshot(sdir1,"s4");
hdfs.setReplication(newfoo,REPL_2);
FileStatus status=hdfs.getFileStatus(newfoo);
assertEquals(REPL_2,status.getReplication());
final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo");
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.createSnapshot(sdir1,"s5");
final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo");
status=hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
INodeFile snode=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1,snode.getDiffs().asList().size());
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(foo_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test renaming a dir and then delete snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path newbar=new Path(newfoo,bar.getName());
final Path newbar2=new Path(newfoo,bar2.getName());
final Path newbar3=new Path(newfoo,"bar3");
DFSTestUtil.createFile(hdfs,newbar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newbar,true);
hdfs.delete(newbar3,true);
assertFalse(hdfs.exists(newbar3));
assertFalse(hdfs.exists(bar));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
final Path bar3_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar3");
assertTrue(hdfs.exists(bar_s4));
assertTrue(hdfs.exists(bar3_s4));
hdfs.createSnapshot(sdir1,"s5");
hdfs.delete(newbar2,true);
assertFalse(hdfs.exists(bar2));
final Path bar2_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo/bar2");
assertTrue(hdfs.exists(bar2_s5));
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(bar2_s5));
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(bar_s4));
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s4));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar3_s4));
Path bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
final Path bar3_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar3");
assertFalse(hdfs.exists(bar3_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file under a snapshottable directory, file exists
* in a snapshot.
*/
@Test public void testRenameFileInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sub1,snap1);
hdfs.rename(file1,file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
System.out.println("DiffList is " + diffReport.toString());
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file2.getName()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub3=new Path(sub1,"sub3");
final Path sub2file1=new Path(sub2,"sub2file1");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2,sub3);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertEquals(2,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test public void testClearQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET);
INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(1,dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing();
assertEquals(1,status.length);
assertEquals(dir,status[0].getFullPath());
final Path subDir=new Path(dir,"sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir,"s2");
final Path file=new Path(subDir,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
INode subNode=fsdir.getINode4Write(subDir.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
List diffList=subNode.asDirectory().getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s2=dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),diffList.get(0).getSnapshotId());
List createdList=diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,createdList.size());
assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testSetQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path sub=new Path(dir,"sub");
hdfs.mkdirs(sub);
Path fileInSub=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,fileInSub,BLOCKSIZE,REPLICATION,seed);
INodeDirectory subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertFalse(subNode.isWithSnapshot());
hdfs.setQuota(sub,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode.isWithSnapshot());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test multiple calls of allowSnapshot and disallowSnapshot, to make sure
* they are idempotent
*/
@Test public void testAllowAndDisallowSnapshot() throws Exception {
final Path dir=new Path("/dir");
final Path file0=new Path(dir,"file0");
final Path file1=new Path(dir,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
hdfs.disallowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.disallowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
final Path root=new Path("/");
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.disallowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.disallowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
final Path foo=new Path("/foo");
final Path foo2=new Path("/foo2");
hdfs.mkdirs(foo);
hdfs.mkdirs(foo2);
hdfs.allowSnapshot(foo);
hdfs.allowSnapshot(foo2);
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo2,"bar");
DFSTestUtil.createFile(hdfs,bar,100,(short)2,100024L);
hdfs.createSnapshot(foo,"s1");
assertTrue(hdfs.rename(bar,bar2));
hdfs.createSnapshot(foo2,"s2");
assertTrue(hdfs.delete(bar2,true));
NameNode nameNode=cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode,false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
String barSnapshotPath=Snapshot.getSnapshotPath(foo.toString(),"s1/bar");
DFSTestUtil.readFile(hdfs,new Path(barSnapshotPath));
String bar2SnapshotPath=Snapshot.getSnapshotPath(foo2.toString(),"s2/bar");
DFSTestUtil.readFile(hdfs,new Path(bar2SnapshotPath));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReadSnapshotFileWithCheckpoint() throws Exception {
Path foo=new Path("/foo");
hdfs.mkdirs(foo);
hdfs.allowSnapshot(foo);
Path bar=new Path("/foo/bar");
DFSTestUtil.createFile(hdfs,bar,100,(short)2,100024L);
hdfs.createSnapshot(foo,"s1");
assertTrue(hdfs.delete(bar,true));
NameNode nameNode=cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode,false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
String snapshotPath=Snapshot.getSnapshotPath(foo.toString(),"s1/bar");
DFSTestUtil.readFile(hdfs,new Path(snapshotPath));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test(timeout=60000) public void testDeletionWithSnapshots() throws Exception {
Path file0=new Path(sub1,"file0");
Path file1=new Path(sub1,"file1");
Path sub2=new Path(sub1,"sub2");
Path file2=new Path(sub2,"file2");
Path file3=new Path(sub1,"file3");
Path file4=new Path(sub1,"file4");
Path file5=new Path(sub1,"file5");
DFSTestUtil.createFile(hdfs,file0,4 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,2 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file2,3 * BLOCKSIZE,REPLICATION,seed);
{
final INodeFile f2=assertBlockCollection(file2.toString(),3,fsdir,blockmanager);
BlockInfo[] blocks=f2.getBlocks();
hdfs.delete(sub2,true);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
}
final String[] snapshots={"s0","s1","s2"};
DFSTestUtil.createFile(hdfs,file3,5 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[0]);
DFSTestUtil.createFile(hdfs,file4,1 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[1]);
DFSTestUtil.createFile(hdfs,file5,7 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[2]);
{
INodeFile f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
Assert.assertSame(INodeFile.class,f1.getClass());
hdfs.setReplication(file1,(short)2);
f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
final INodeFile f0=assertBlockCollection(file0.toString(),4,fsdir,blockmanager);
BlockInfo[] blocks0=f0.getBlocks();
Path snapshotFile0=SnapshotTestHelper.getSnapshotPath(sub1,"s0",file0.getName());
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
hdfs.delete(file0,true);
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
String s1f0=SnapshotTestHelper.getSnapshotPath(sub1,"s1",file0.getName()).toString();
assertBlockCollection(s1f0,4,fsdir,blockmanager);
hdfs.deleteSnapshot(sub1,"s1");
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0),s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
}
catch ( IOException e) {
assertExceptionContains("File does not exist: " + s1f0,e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* When combine two snapshots, make sure files/directories created after the
* prior snapshot get destroyed.
*/
@Test(timeout=300000) public void testCombineSnapshotDiff3() throws Exception {
Path dir=new Path("/dir");
Path subDir1=new Path(dir,"subdir1");
Path subDir2=new Path(dir,"subdir2");
hdfs.mkdirs(subDir2);
Path subsubDir=new Path(subDir1,"subsubdir");
hdfs.mkdirs(subsubDir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path newDir=new Path(subsubDir,"newdir");
Path newFile=new Path(newDir,"newfile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
Path newFile2=new Path(subDir2,"newfile");
DFSTestUtil.createFile(hdfs,newFile2,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s2");
checkQuotaUsageComputation(dir,11,BLOCKSIZE * 2 * REPLICATION);
hdfs.delete(subsubDir,true);
hdfs.delete(subDir2,true);
checkQuotaUsageComputation(dir,14,BLOCKSIZE * 2 * REPLICATION);
hdfs.deleteSnapshot(dir,"s2");
checkQuotaUsageComputation(dir,8,0);
Path subdir1_s1=SnapshotTestHelper.getSnapshotPath(dir,"s1",subDir1.getName());
Path subdir1_s2=SnapshotTestHelper.getSnapshotPath(dir,"s2",subDir1.getName());
assertTrue(hdfs.exists(subdir1_s1));
assertFalse(hdfs.exists(subdir1_s2));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv1={"-deleteSnapshot","/tmp"};
int val=shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2={"-deleteSnapshot","/tmp","s1","s2"};
val=shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
*
*/
@Test(timeout=300000) public void testDeleteCurrentFileDirectory() throws Exception {
Path deleteDir=new Path(subsub,"deleteDir");
Path deleteFile=new Path(deleteDir,"deleteFile");
Path noChangeDirParent=new Path(sub,"noChangeDirParent");
Path noChangeDir=new Path(noChangeDirParent,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
DFSTestUtil.createFile(hdfs,deleteFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile1=new Path(subsub,"metaChangeFile1");
DFSTestUtil.createFile(hdfs,metaChangeFile1,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile2=new Path(noChangeDir,"metaChangeFile2");
DFSTestUtil.createFile(hdfs,metaChangeFile2,BLOCKSIZE,REPLICATION,seed);
hdfs.delete(deleteDir,true);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
Path tempDir=new Path(dir,"tempdir");
Path tempFile=new Path(tempDir,"tempfile");
DFSTestUtil.createFile(hdfs,tempFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile temp=TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=temp.getBlocks();
hdfs.delete(tempDir,true);
checkQuotaUsageComputation(dir,9L,BLOCKSIZE * REPLICATION * 3);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path newFileAfterS0=new Path(subsub,"newFile");
DFSTestUtil.createFile(hdfs,newFileAfterS0,BLOCKSIZE,REPLICATION,seed);
hdfs.setReplication(metaChangeFile1,REPLICATION_1);
hdfs.setReplication(metaChangeFile2,REPLICATION_1);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
checkQuotaUsageComputation(dir,14L,BLOCKSIZE * REPLICATION * 4);
Snapshot snapshot0=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
Snapshot snapshot1=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
hdfs.delete(noChangeDirParent,true);
checkQuotaUsageComputation(dir,17L,BLOCKSIZE * REPLICATION * 4);
Path snapshotNoChangeDir=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName() + "/" + noChangeDirParent.getName()+ "/"+ noChangeDir.getName());
INodeDirectory snapshotNode=(INodeDirectory)fsdir.getINode(snapshotNoChangeDir.toString());
assertEquals(INodeDirectory.class,snapshotNode.getClass());
ReadOnlyList children=snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
INode noChangeFileSCopy=children.get(1);
assertEquals(noChangeFile.getName(),noChangeFileSCopy.getLocalName());
assertEquals(INodeFile.class,noChangeFileSCopy.getClass());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,noChangeFileSCopy.getLocalName()).toString(),1,fsdir,blockmanager);
INodeFile metaChangeFile2SCopy=children.get(0).asFile();
assertEquals(metaChangeFile2.getName(),metaChangeFile2SCopy.getLocalName());
assertTrue(metaChangeFile2SCopy.isWithSnapshot());
assertFalse(metaChangeFile2SCopy.isUnderConstruction());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,metaChangeFile2SCopy.getLocalName()).toString(),1,fsdir,blockmanager);
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile newFileNode=TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(),1,fsdir,blockmanager);
blocks=newFileNode.getBlocks();
checkQuotaUsageComputation(dir,18L,BLOCKSIZE * REPLICATION * 5);
hdfs.delete(sub,true);
checkQuotaUsageComputation(dir,19L,BLOCKSIZE * REPLICATION * 4);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path snapshotSub=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName());
INodeDirectory snapshotNode4Sub=fsdir.getINode(snapshotSub.toString()).asDirectory();
assertTrue(snapshotNode4Sub.isWithSnapshot());
assertEquals(1,snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
assertEquals(2,snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
INode snapshotNode4Subsub=snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
INodeDirectory snapshotSubsubDir=(INodeDirectory)snapshotNode4Subsub;
children=snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
assertEquals(children.get(0).getLocalName(),metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(),newFileAfterS0.getName());
children=snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1,children.size());
INode child=children.get(0);
assertEquals(child.getLocalName(),metaChangeFile1.getName());
INodeFile metaChangeFile1SCopy=child.asFile();
assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that the global limit on snapshots is honored.
*/
@Test(timeout=10000) public void testSnapshotLimits() throws Exception {
INodeDirectory ids=mock(INodeDirectory.class);
FSDirectory fsdir=mock(FSDirectory.class);
SnapshotManager sm=spy(new SnapshotManager(fsdir));
doReturn(ids).when(sm).getSnapshottableRoot(anyString());
doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
for (Integer i=0; i < testMaxSnapshotLimit; ++i) {
sm.createSnapshot("dummy",i.toString());
}
try {
sm.createSnapshot("dummy","shouldFailSnapshot");
Assert.fail("Expected SnapshotException not thrown");
}
catch ( SnapshotException se) {
Assert.assertTrue(se.getMessage().toLowerCase().contains("rollover"));
}
sm.deleteSnapshot("","",mock(INode.BlocksMapUpdateInfo.class),new ArrayList());
try {
sm.createSnapshot("dummy","shouldFailSnapshot2");
Assert.fail("Expected SnapshotException not thrown");
}
catch ( SnapshotException se) {
Assert.assertTrue(se.getMessage().toLowerCase().contains("rollover"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test(timeout=60000) public void testSnapshotRename() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s1");
Path ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusBeforeRename=hdfs.getFileStatus(ssPath);
hdfs.renameSnapshot(sub1,"s1","s2");
assertFalse(hdfs.exists(ssPath));
snapshotRoot=SnapshotTestHelper.getSnapshotRoot(sub1,"s2");
ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusAfterRename=hdfs.getFileStatus(ssPath);
assertFalse(statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals(statusBeforeRename.toString(),statusAfterRename.toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv1={"-renameSnapshot","/tmp","s1"};
int val=shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2={"-renameSnapshot","/tmp","s1","s2","s3"};
val=shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test getting SnapshotStatsMXBean information
*/
@Test public void testSnapshotStatsMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
String pathName="/snapshot";
Path path=new Path(pathName);
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
SnapshotManager sm=cluster.getNamesystem().getSnapshotManager();
DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem();
dfs.mkdirs(path);
dfs.allowSnapshot(path);
dfs.createSnapshot(path);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo");
CompositeData[] directories=(CompositeData[])mbs.getAttribute(mxbeanName,"SnapshottableDirectories");
int numDirectories=Array.getLength(directories);
assertEquals(sm.getNumSnapshottableDirs(),numDirectories);
CompositeData[] snapshots=(CompositeData[])mbs.getAttribute(mxbeanName,"Snapshots");
int numSnapshots=Array.getLength(snapshots);
assertEquals(sm.getNumSnapshots(),numSnapshots);
CompositeData d=(CompositeData)Array.get(directories,0);
CompositeData s=(CompositeData)Array.get(snapshots,0);
assertTrue(((String)d.get("path")).contains(pathName));
assertTrue(((String)s.get("snapshotDirectory")).contains(pathName));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testThreadSafety() throws Exception {
int numThreads=100;
Phase[] phases={LOADING_FSIMAGE,LOADING_FSIMAGE,LOADING_EDITS,LOADING_EDITS};
Step[] steps=new Step[]{new Step(INODES),new Step(DELEGATION_KEYS),new Step(INODES),new Step(DELEGATION_KEYS)};
String[] files={"file1","file1","file2","file2"};
long[] sizes={1000L,1000L,2000L,2000L};
long[] totals={10000L,20000L,30000L,40000L};
ExecutorService exec=Executors.newFixedThreadPool(numThreads);
try {
for (int i=0; i < numThreads; ++i) {
final Phase phase=phases[i % phases.length];
final Step step=steps[i % steps.length];
final String file=files[i % files.length];
final long size=sizes[i % sizes.length];
final long total=totals[i % totals.length];
exec.submit(new Callable(){
@Override public Void call(){
startupProgress.beginPhase(phase);
startupProgress.setFile(phase,file);
startupProgress.setSize(phase,size);
startupProgress.setTotal(phase,step,total);
incrementCounter(startupProgress,phase,step,100L);
startupProgress.endStep(phase,step);
startupProgress.endPhase(phase);
return null;
}
}
);
}
}
finally {
exec.shutdown();
assertTrue(exec.awaitTermination(10000L,TimeUnit.MILLISECONDS));
}
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals("file1",view.getFile(LOADING_FSIMAGE));
assertEquals(1000L,view.getSize(LOADING_FSIMAGE));
assertEquals(10000L,view.getTotal(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(20000L,view.getTotal(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals("file2",view.getFile(LOADING_EDITS));
assertEquals(2000L,view.getSize(LOADING_EDITS));
assertEquals(30000L,view.getTotal(LOADING_EDITS,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(INODES)));
assertEquals(40000L,view.getTotal(LOADING_EDITS,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(DELEGATION_KEYS)));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testElapsedTime() throws Exception {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes);
Thread.sleep(50L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes);
Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
Thread.sleep(50L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile=new Step("file",1000L);
startupProgress.beginStep(LOADING_EDITS,loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L);
incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L);
Thread.sleep(50L);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertTrue(view.getElapsedTime() > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageDelegationKeys) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS,loadingEditsFile) > 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT) == 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT,new Step(INODES)) == 0);
long totalTime=view.getElapsedTime();
long loadingFsImageTime=view.getElapsedTime(LOADING_FSIMAGE);
long loadingFsImageInodesTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes);
long loadingFsImageDelegationKeysTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes);
long loadingEditsTime=view.getElapsedTime(LOADING_EDITS);
long loadingEditsFileTime=view.getElapsedTime(LOADING_EDITS,loadingEditsFile);
Thread.sleep(50L);
assertTrue(totalTime < view.getElapsedTime());
assertEquals(loadingFsImageTime,view.getElapsedTime(LOADING_FSIMAGE));
assertEquals(loadingFsImageInodesTime,view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes));
assertTrue(loadingEditsTime < view.getElapsedTime(LOADING_EDITS));
assertTrue(loadingEditsFileTime < view.getElapsedTime(LOADING_EDITS,loadingEditsFile));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFrozenAfterStartupCompletes(){
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE,"file1");
startupProgress.setSize(LOADING_FSIMAGE,1000L);
Step step=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,step);
startupProgress.setTotal(LOADING_FSIMAGE,step,10000L);
incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L);
startupProgress.endStep(LOADING_FSIMAGE,step);
startupProgress.endPhase(LOADING_FSIMAGE);
for ( Phase phase : EnumSet.allOf(Phase.class)) {
if (startupProgress.getStatus(phase) != Status.COMPLETE) {
startupProgress.beginPhase(phase);
startupProgress.endPhase(phase);
}
}
StartupProgressView before=startupProgress.createView();
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE,"file2");
startupProgress.setSize(LOADING_FSIMAGE,2000L);
startupProgress.beginStep(LOADING_FSIMAGE,step);
startupProgress.setTotal(LOADING_FSIMAGE,step,20000L);
incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L);
startupProgress.endStep(LOADING_FSIMAGE,step);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step newStep=new Step("file1");
startupProgress.beginStep(LOADING_EDITS,newStep);
incrementCounter(startupProgress,LOADING_EDITS,newStep,100L);
startupProgress.endStep(LOADING_EDITS,newStep);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView after=startupProgress.createView();
assertEquals(before.getCount(LOADING_FSIMAGE),after.getCount(LOADING_FSIMAGE));
assertEquals(before.getCount(LOADING_FSIMAGE,step),after.getCount(LOADING_FSIMAGE,step));
assertEquals(before.getElapsedTime(),after.getElapsedTime());
assertEquals(before.getElapsedTime(LOADING_FSIMAGE),after.getElapsedTime(LOADING_FSIMAGE));
assertEquals(before.getElapsedTime(LOADING_FSIMAGE,step),after.getElapsedTime(LOADING_FSIMAGE,step));
assertEquals(before.getFile(LOADING_FSIMAGE),after.getFile(LOADING_FSIMAGE));
assertEquals(before.getSize(LOADING_FSIMAGE),after.getSize(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE),after.getTotal(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE,step),after.getTotal(LOADING_FSIMAGE,step));
assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext());
}
APIUtilityVerifier BooleanVerifier
@Test public void testRunningState(){
setStartupProgressForRunningState(startupProgress);
MetricsRecordBuilder builder=getMetrics(metrics,true);
assertTrue(getLongCounter("ElapsedTime",builder) >= 0L);
assertGauge("PercentComplete",0.375f,builder);
assertCounter("LoadingFsImageCount",100L,builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime",builder) >= 0L);
assertCounter("LoadingFsImageTotal",100L,builder);
assertGauge("LoadingFsImagePercentComplete",1.0f,builder);
assertCounter("LoadingEditsCount",100L,builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime",builder) >= 0L);
assertCounter("LoadingEditsTotal",200L,builder);
assertGauge("LoadingEditsPercentComplete",0.5f,builder);
assertCounter("SavingCheckpointCount",0L,builder);
assertCounter("SavingCheckpointElapsedTime",0L,builder);
assertCounter("SavingCheckpointTotal",0L,builder);
assertGauge("SavingCheckpointPercentComplete",0.0f,builder);
assertCounter("SafeModeCount",0L,builder);
assertCounter("SafeModeElapsedTime",0L,builder);
assertCounter("SafeModeTotal",0L,builder);
assertGauge("SafeModePercentComplete",0.0f,builder);
}
APIUtilityVerifier BooleanVerifier
@Test public void testFinalState(){
setStartupProgressForFinalState(startupProgress);
MetricsRecordBuilder builder=getMetrics(metrics,true);
assertTrue(getLongCounter("ElapsedTime",builder) >= 0L);
assertGauge("PercentComplete",1.0f,builder);
assertCounter("LoadingFsImageCount",100L,builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime",builder) >= 0L);
assertCounter("LoadingFsImageTotal",100L,builder);
assertGauge("LoadingFsImagePercentComplete",1.0f,builder);
assertCounter("LoadingEditsCount",200L,builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime",builder) >= 0L);
assertCounter("LoadingEditsTotal",200L,builder);
assertGauge("LoadingEditsPercentComplete",1.0f,builder);
assertCounter("SavingCheckpointCount",300L,builder);
assertTrue(getLongCounter("SavingCheckpointElapsedTime",builder) >= 0L);
assertCounter("SavingCheckpointTotal",300L,builder);
assertGauge("SavingCheckpointPercentComplete",1.0f,builder);
assertCounter("SafeModeCount",400L,builder);
assertTrue(getLongCounter("SafeModeElapsedTime",builder) >= 0L);
assertCounter("SafeModeTotal",400L,builder);
assertGauge("SafeModePercentComplete",1.0f,builder);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=60000) public void testEviction() throws Exception {
final ShortCircuitCache cache=new ShortCircuitCache(2,10000000,1,10000000,1,10000,0);
final TestFileDescriptorPair pairs[]=new TestFileDescriptorPair[]{new TestFileDescriptorPair(),new TestFileDescriptorPair(),new TestFileDescriptorPair()};
ShortCircuitReplicaInfo replicaInfos[]=new ShortCircuitReplicaInfo[]{null,null,null};
for (int i=0; i < pairs.length; i++) {
replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new SimpleReplicaCreator(i,cache,pairs[i]));
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream());
}
for (int i=0; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
for (int i=1; i < pairs.length; i++) {
final Integer iVal=new Integer(i);
replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Assert.fail("expected to use existing entry for " + iVal);
return null;
}
}
);
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream());
}
final MutableBoolean calledCreate=new MutableBoolean(false);
replicaInfos[0]=cache.fetchOrCreate(new ExtendedBlockId(0,"test_bp1"),new ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
calledCreate.setValue(true);
return null;
}
}
);
Preconditions.checkState(replicaInfos[0].getReplica() == null);
Assert.assertTrue(calledCreate.isTrue());
for (int i=1; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
for (int i=0; i < pairs.length; i++) {
pairs[i].close();
}
cache.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testShmBasedStaleness() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShmBasedStaleness",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
FSDataInputStream fis=fs.open(new Path(TEST_FILE));
int first=fis.read();
final ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,new Path(TEST_FILE));
Assert.assertTrue(first != -1);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.getSlot().isValid());
}
}
);
cluster.getDataNodes().get(0).shutdown();
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertFalse(replica.getSlot().isValid());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test unlinking a file whose blocks we are caching in the DFSClient.
* The DataNode will notify the DFSClient that the replica is stale via the
* ShortCircuitShm.
*/
@Test(timeout=60000) public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache",sockDir);
conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,1000000000L);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
final Path TEST_PATH=new Path("/test_file");
final int TEST_FILE_LEN=8193;
final int SEED=0xFADE0;
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,TEST_PATH);
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
Assert.assertFalse(shm.isDisconnected());
}
}
);
fs.delete(TEST_PATH,false);
GenericTestUtils.waitFor(new Supplier(){
MutableBoolean done=new MutableBoolean(true);
@Override public Boolean get(){
try {
done.setValue(true);
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Slot slot=iter.next();
if (slot.isValid()) {
done.setValue(false);
}
}
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocShm() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testAllocShm",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
DomainPeer peer=getDomainPeerToDn(conf);
MutableBoolean usedPeer=new MutableBoolean(false);
ExtendedBlockId blockId=new ExtendedBlockId(123,"xyz");
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
Slot slot=cache.allocShmSlot(datanode,peer,usedPeer,blockId,"testAllocShm_client");
Assert.assertNotNull(slot);
Assert.assertTrue(usedPeer.booleanValue());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertFalse(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(1,vinfo.notFull.size());
}
}
);
cache.scheduleSlotReleaser(slot);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
final MutableBoolean done=new MutableBoolean(false);
try {
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
done.setValue(info.get(datanode).full.isEmpty() && info.get(datanode).notFull.isEmpty());
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs,new Path("/tmp/x"),16,(short)1,23);
LocatedBlocks lb=cluster.getNameNode().getRpcServer().getBlockLocations("/tmp/x",0,16);
ExtendedBlock blk=new ExtendedBlock(lb.get(0).getBlock());
Token token=lb.get(0).getBlockToken();
final DatanodeInfo dnInfo=lb.get(0).getLocations()[0];
ClientDatanodeProtocol proxy=DFSUtil.createClientDatanodeProtocolProxy(dnInfo,conf,60000,false);
try {
proxy.getBlockLocalPathInfo(blk,token);
Assert.fail("The call should have failed as this user " + " is not allowed to call getBlockLocalPathInfo");
}
catch ( IOException ex) {
Assert.assertTrue(ex.getMessage().contains("not allowed to call getBlockLocalPathInfo"));
}
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testSkipWithVerifyChecksum() throws IOException {
int size=blockSize;
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,"/tmp/testSkipWithVerifyChecksum._PORT");
DomainSocket.disableBindPathValidation();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FileSystem fs=cluster.getFileSystem();
try {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory() == true);
byte[] fileData=AppendTestUtil.randomBytes(seed,size * 3);
Path file1=new Path("filelocal.dat");
FSDataOutputStream stm=createFile(fs,file1,1);
stm.write(fileData);
stm.close();
FSDataInputStream instm=fs.open(file1);
byte[] actual=new byte[fileData.length];
int nread=instm.read(actual,0,3);
long skipped=2 * size + 3;
instm.seek(skipped);
nread=instm.read(actual,(int)(skipped + nread),3);
instm.close();
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocateSlots() throws Exception {
File path=new File(TEST_BASE,"testAllocateSlots");
path.mkdirs();
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{path.getAbsolutePath()});
FileInputStream stream=factory.createDescriptor("testAllocateSlots",4096);
ShortCircuitShm shm=new ShortCircuitShm(ShmId.createRandom(),stream);
int numSlots=0;
ArrayList slots=new ArrayList();
while (!shm.isFull()) {
Slot slot=shm.allocAndRegisterSlot(new ExtendedBlockId(123L,"test_bp1"));
slots.add(slot);
numSlots++;
}
LOG.info("allocated " + numSlots + " slots before running out.");
int slotIdx=0;
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Assert.assertTrue(slots.contains(iter.next()));
}
for ( Slot slot : slots) {
Assert.assertFalse(slot.addAnchor());
Assert.assertEquals(slotIdx++,slot.getSlotIdx());
}
for ( Slot slot : slots) {
slot.makeAnchorable();
}
for ( Slot slot : slots) {
Assert.assertTrue(slot.addAnchor());
}
for ( Slot slot : slots) {
slot.removeAnchor();
}
for ( Slot slot : slots) {
shm.unregisterSlot(slot.getSlotIdx());
slot.makeInvalid();
}
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that, if automatic HA is enabled, none of the mutative operations
* will succeed, unless the -forcemanual flag is specified.
* @throws Exception
*/
@Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf=getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand());
tool.setConf(conf);
assertEquals(-1,runTool("-transitionToActive","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1,runTool("-transitionToStandby","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol,Mockito.never()).transitionToActive(anyReqInfo());
Mockito.verify(mockProtocol,Mockito.never()).transitionToStandby(anyReqInfo());
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToActive","-forcemanual","nn1"));
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToStandby","-forcemanual","nn1"));
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToActive(reqInfoCaptor.capture());
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToStandby(reqInfoCaptor.capture());
for ( StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
assertEquals(RequestSource.REQUEST_BY_USER_FORCED,ri.getSource());
}
}
InternalCallVerifier BooleanVerifier
/**
* Test case to check whether both the name node is active or not
* @throws Exception
*/
@Test public void testTransitionToActiveWhenOtherNamenodeisActive() throws Exception {
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
if (nn1.getState() != null && !nn1.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(0);
}
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn1.isStandbyState());
assertTrue(nn2.isStandbyState());
runTool("-transitionToActive","nn1");
runTool("-transitionToActive","nn2");
assertFalse("Both namenodes cannot be active",nn1.isActiveState() && nn2.isActiveState());
if (nn1.getState() != null && !nn1.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(0);
}
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn1.isStandbyState());
assertTrue(nn2.isStandbyState());
runTool("-transitionToActive","nn1");
runTool("-transitionToActive","nn2","--forceactive");
assertFalse("Both namenodes cannot be active even though with forceActive",nn1.isActiveState() && nn2.isActiveState());
cluster.shutdownNameNode(0);
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn2.isStandbyState());
assertFalse(cluster.isNameNodeUp(0));
runTool("-transitionToActive","nn2","--forceactive");
assertTrue("Namenode nn2 should be active",nn2.isActiveState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStateTransition() throws Exception {
NameNode nnode1=cluster.getNameNode(0);
assertTrue(nnode1.isStandbyState());
assertEquals(0,runTool("-transitionToActive","nn1"));
assertFalse(nnode1.isStandbyState());
assertEquals(0,runTool("-transitionToStandby","nn1"));
assertTrue(nnode1.isStandbyState());
NameNode nnode2=cluster.getNameNode(1);
assertTrue(nnode2.isStandbyState());
assertEquals(0,runTool("-transitionToActive","nn2"));
assertFalse(nnode2.isStandbyState());
assertEquals(0,runTool("-transitionToStandby","nn2"));
assertTrue(nnode2.isStandbyState());
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTryFailoverToSafeMode() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false);
assertEquals(-1,runTool("-failover","nn2","nn1"));
assertTrue("Bad output: " + errOutput,errOutput.contains("is not ready to become active: " + "The NameNode is in safemode"));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test failover with various options
*/
@Test public void testFencer() throws Exception {
assertEquals(-1,runTool("-failover","nn1","nn2"));
File tmpFile=File.createTempFile("testFencer",".txt");
tmpFile.deleteOnExit();
if (Shell.WINDOWS) {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo %target_nameserviceid%.%target_namenodeid% " + "%target_port% %dfs_ha_namenode_id% > " + tmpFile.getAbsolutePath() + ")");
}
else {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo -n $target_nameserviceid.$target_namenodeid " + "$target_port $dfs_ha_namenode_id > " + tmpFile.getAbsolutePath() + ")");
}
tool.setConf(conf);
assertEquals(0,runTool("-transitionToActive","nn1"));
assertEquals(0,runTool("-failover","nn1","nn2"));
assertEquals(0,runTool("-ns","minidfs-ns","-failover","nn2","nn1"));
assertEquals("",Files.toString(tmpFile,Charsets.UTF_8));
assertEquals(0,runTool("-failover","nn1","nn2","--forcefence"));
String fenceCommandOutput=Files.toString(tmpFile,Charsets.UTF_8).replaceAll(" *[\r\n]+","");
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",fenceCommandOutput);
tmpFile.delete();
assertEquals(0,runTool("-failover","nn2","nn1","--forceactive"));
assertFalse(tmpFile.exists());
conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
assertFalse(tmpFile.exists());
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"foobar!");
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
assertFalse(tmpFile.exists());
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0,runTool("-failover","--forcefence","nn1","nn2"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Call fetch token using http server
*/
@Test public void expectedTokenIsRetrievedFromHttp() throws Exception {
final Token testToken=new Token("id".getBytes(),"pwd".getBytes(),FakeRenewer.KIND,new Text("127.0.0.1:1234"));
WebHdfsFileSystem fs=mock(WebHdfsFileSystem.class);
doReturn(testToken).when(fs).getDelegationToken(anyString());
Path p=new Path(f.getRoot().getAbsolutePath(),tokenFile);
DelegationTokenFetcher.saveDelegationToken(conf,fs,null,p);
Credentials creds=Credentials.readTokenStorageFile(p,conf);
Iterator> itr=creds.getAllTokens().iterator();
assertTrue("token not exist error",itr.hasNext());
Token> fetchedToken=itr.next();
Assert.assertArrayEquals("token wrong identifier error",testToken.getIdentifier(),fetchedToken.getIdentifier());
Assert.assertArrayEquals("token wrong password error",testToken.getPassword(),fetchedToken.getPassword());
DelegationTokenFetcher.renewTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastRenewed());
DelegationTokenFetcher.cancelTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastCanceled());
}
APIUtilityVerifier BooleanVerifier
/**
* Test invalid argument to the tool
*/
@Test(timeout=10000) public void testInvalidArgument() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
String[] args={"-invalidArgument"};
String ret=runTool(conf,args,false);
assertTrue(ret.contains(GetConf.USAGE));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testExtraArgsThrowsError() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set("mykey","myval");
String[] args={"-namenodes","unexpected-arg"};
assertTrue(runTool(conf,args,false).contains("Did not expect argument: unexpected-arg"));
}
BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},{@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
@Test(timeout=10000) public void testTool() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration(false);
for ( Command cmd : Command.values()) {
CommandHandler handler=Command.getHandler(cmd.getName());
if (handler.key != null && !"-confKey".equals(cmd.getName())) {
String[] args={cmd.getName()};
conf.set(handler.key,"value");
assertTrue(runTool(conf,args,true).contains("value"));
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecoveryMode() throws IOException {
String edits=nnHelper.generateEdits();
FileOutputStream os=new FileOutputStream(edits,true);
FileChannel editsFile=os.getChannel();
editsFile.truncate(editsFile.size() - 5);
String editsParsedXml=folder.newFile("editsRecoveredParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsRecoveredReparsed").getAbsolutePath();
String editsParsedXml2=folder.newFile("editsRecoveredParsed2.xml").getAbsolutePath();
assertEquals(-1,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(edits,editsParsedXml,"xml",true));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertEquals(0,runOev(editsReparsed,editsParsedXml2,"xml",false));
assertTrue("Test round trip",filesEqualIgnoreTrailingZeros(editsParsedXml,editsParsedXml2));
os.close();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the OfflineEditsViewer
*/
@Test public void testGenerated() throws IOException {
String edits=nnHelper.generateEdits();
LOG.info("Generated edits=" + edits);
String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsParsed").getAbsolutePath();
assertEquals(0,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits);
assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStored() throws IOException {
final String cacheDir=System.getProperty("test.cache.data","build/test/cache");
String editsStored=cacheDir + "/editsStored";
String editsStoredParsedXml=cacheDir + "/editsStoredParsed.xml";
String editsStoredReparsed=cacheDir + "/editsStoredReparsed";
String editsStoredXml=cacheDir + "/editsStored.xml";
assertEquals(0,runOev(editsStored,editsStoredParsedXml,"xml",false));
assertEquals(0,runOev(editsStoredParsedXml,editsStoredReparsed,"binary",false));
assertTrue("Edits " + editsStored + " should have all op codes",hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),new File(editsStoredParsedXml),"UTF-8"));
assertTrue("Reference edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(editsStored,editsStoredReparsed));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileDistributionCalculator() throws IOException {
StringWriter output=new StringWriter();
PrintWriter o=new PrintWriter(output);
new FileDistributionCalculator(new Configuration(),0,0,o).visit(new RandomAccessFile(originalFsimage,"r"));
o.close();
Pattern p=Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR,totalFiles);
p=Pattern.compile("totalDirectories = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS + 3,totalDirs);
FileStatus maxFile=Collections.max(writtenFiles.values(),new Comparator(){
@Override public int compare( FileStatus first, FileStatus second){
return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
}
}
);
p=Pattern.compile("maxFileSize = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(),Long.parseLong(matcher.group(1)));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testOverwriteFile() throws IOException {
assertTrue("Creating empty dst file",DST_FILE.createNewFile());
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertTrue("Empty file still exists",DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertEquals("",DFSTestUtil.readFile(DST_FILE));
fos.close();
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void cleanupTestDir() throws IOException {
assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
FileUtil.fullyDeleteContents(TEST_DIR);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testWriteNewFile() throws IOException {
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertFalse(DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertFalse(DST_FILE.exists());
fos.close();
assertTrue(DST_FILE.exists());
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
BranchVerifier TestInitializer BooleanVerifier HybridVerifier
@Before public void cleanup(){
if (FILE.exists()) {
assertTrue(FILE.delete());
}
FILE.getParentFile().mkdirs();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTruncatedFileReturnsDefault() throws IOException {
assertTrue(FILE.createNewFile());
assertEquals(0,FILE.length());
BestEffortLongFile f=new BestEffortLongFile(FILE,12345L);
try {
assertEquals(12345L,f.get());
}
finally {
f.close();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSet() throws IOException {
BestEffortLongFile f=new BestEffortLongFile(FILE,12345L);
try {
assertEquals(12345L,f.get());
assertTrue(FILE.exists());
Random r=new Random();
for (int i=0; i < 100; i++) {
long newVal=r.nextLong();
f.set(newVal);
assertEquals(newVal,f.get());
BestEffortLongFile f2=new BestEffortLongFile(FILE,999L);
try {
assertEquals(newVal,f2.get());
}
finally {
IOUtils.closeStream(f2);
}
}
}
finally {
IOUtils.closeStream(f);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBasics(){
final int N_ELEMS=100000;
ChunkedArrayList l=new ChunkedArrayList();
assertTrue(l.isEmpty());
for (int i=0; i < N_ELEMS; i++) {
l.add(i);
}
assertFalse(l.isEmpty());
assertEquals(N_ELEMS,l.size());
assertTrue(l.getNumChunks() > 10);
assertEquals(8192,l.getMaxChunkSize());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testMark() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertFalse(s.markSupported());
try {
s.mark(1);
fail("Mark should not succeed");
}
catch ( UnsupportedOperationException uoe) {
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List poll=set.pollN(0);
assertEquals(0,poll.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
poll=set.pollN(10);
assertEquals(10,poll.size());
for ( Integer i : poll) {
assertTrue(list.contains(i));
assertFalse(set.contains(i));
}
poll=set.pollN(1000);
assertEquals(NUM - 10,poll.size());
for ( Integer i : poll) {
assertTrue(list.contains(i));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveOne(){
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
assertTrue(set.remove(list.get(0)));
assertEquals(0,set.size());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
iter=set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
assertTrue(set.removeAll(list));
assertTrue(set.isEmpty());
List sub=new LinkedList();
for (int i=0; i < 10; i++) {
sub.add(list.get(i));
}
assertTrue(set.addAll(list));
assertTrue(set.removeAll(sub));
assertFalse(set.isEmpty());
assertEquals(NUM - 10,set.size());
for ( Integer i : sub) {
assertFalse(set.contains(i));
}
assertFalse(set.containsAll(sub));
List sub2=new LinkedList();
for (int i=10; i < NUM; i++) {
sub2.add(list.get(i));
}
assertTrue(set.containsAll(sub2));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM - 10,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(sub2.contains(array[i]));
}
assertEquals(NUM - 10,set.size());
Object[] array2=set.toArray();
assertEquals(NUM - 10,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(sub2.contains(array2[i]));
}
LOG.info("Test other - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMultiArray(){
LOG.info("Test pollN multi array");
set.addAll(list);
Integer[] poll=new Integer[10];
poll=set.pollToArray(poll);
assertEquals(10,poll.length);
for ( Integer i : poll) {
assertTrue(list.contains(i));
assertFalse(set.contains(i));
}
poll=new Integer[NUM];
poll=set.pollToArray(poll);
assertEquals(NUM - 10,poll.length);
for (int i=0; i < NUM - 10; i++) {
assertTrue(list.contains(poll[i]));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
set.addAll(list);
poll=new Integer[NUM];
poll=set.pollToArray(poll);
assertTrue(set.isEmpty());
assertEquals(0,set.size());
assertEquals(NUM,poll.length);
for (int i=0; i < NUM; i++) {
assertTrue(list.contains(poll[i]));
}
set.addAll(list);
poll=new Integer[0];
poll=set.pollToArray(poll);
for (int i=0; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
assertEquals(0,poll.length);
LOG.info("Test pollN multi array- DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
List poll=set.pollAll();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
for ( Integer i : poll) {
assertTrue(list.contains(i));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEmptyBasic(){
LOG.info("Test empty basic");
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertEquals(0,set.size());
assertTrue(set.isEmpty());
LOG.info("Test empty - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveMulti(){
LOG.info("Test remove multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
LOG.info("Test remove multi - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveAll(){
LOG.info("Test remove all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testClear(){
LOG.info("Test clear");
set.addAll(list);
assertEquals(NUM,set.size());
assertFalse(set.isEmpty());
set.clear();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
Integer next=iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollMulti(){
LOG.info("Test poll multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertEquals(list.get(i),set.pollFirst());
}
assertEquals(NUM / 2,set.size());
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
int num=NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(num,NUM);
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.add(list.get(i)));
}
assertEquals(NUM,set.size());
for (int i=NUM / 2; i < NUM; i++) {
assertEquals(list.get(i),set.pollFirst());
}
for (int i=0; i < NUM / 2; i++) {
assertEquals(list.get(i),set.pollFirst());
}
assertEquals(0,set.size());
assertTrue(set.isEmpty());
LOG.info("Test poll multi - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testBookmarkSetToHeadOnAddToEmpty(){
LOG.info("Test bookmark is set after adding to previously empty set.");
Iterator it=set.getBookmark();
assertFalse(it.hasNext());
set.add(list.get(0));
set.add(list.get(1));
it=set.getBookmark();
assertTrue(it.hasNext());
assertEquals(it.next(),list.get(0));
assertEquals(it.next(),list.get(1));
assertFalse(it.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testGetBookmarkReturnsBookmarkIterator(){
LOG.info("Test getBookmark returns proper iterator");
assertTrue(set.addAll(list));
Iterator bookmark=set.getBookmark();
assertEquals(bookmark.next(),list.get(0));
final int numAdvance=list.size() / 2;
for (int i=1; i < numAdvance; i++) {
bookmark.next();
}
Iterator bookmark2=set.getBookmark();
assertEquals(bookmark2.next(),list.get(numAdvance));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveMulti(){
LOG.info("Test remove multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
int num=NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(num,NUM);
LOG.info("Test remove multi - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testEmptyBasic(){
LOG.info("Test empty basic");
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertEquals(0,set.size());
assertTrue(set.isEmpty());
assertNull(set.pollFirst());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
LOG.info("Test empty - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator it=set.getBookmark();
assertEquals(it.next(),list.get(0));
set.remove(list.get(1));
it=set.getBookmark();
assertEquals(it.next(),list.get(2));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
while (set.pollFirst() != null) ;
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveOne(){
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
assertTrue(set.remove(list.get(0)));
assertEquals(0,set.size());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertNull(set.pollFirst());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
iter=set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testClear(){
LOG.info("Test clear");
set.addAll(list);
assertEquals(NUM,set.size());
assertFalse(set.isEmpty());
Iterator bkmrkIt=set.getBookmark();
for (int i=0; i < set.size() / 2 + 1; i++) {
bkmrkIt.next();
}
assertTrue(bkmrkIt.hasNext());
set.clear();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
bkmrkIt=set.getBookmark();
assertFalse(bkmrkIt.hasNext());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
assertNull(set.pollFirst());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveAll(){
LOG.info("Test remove all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM,set.size());
Object[] array2=set.toArray();
assertEquals(NUM,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List l=set.pollN(10);
assertEquals(10,l.size());
for (int i=0; i < 10; i++) {
assertEquals(list.get(i),l.get(i));
}
l=set.pollN(1000);
assertEquals(NUM - 10,l.size());
for (int i=10; i < NUM; i++) {
assertEquals(list.get(i),l.get(i - 10));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setup() throws IOException {
FileUtil.fullyDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
FileOutputStream fos=new FileOutputStream(TEST_FILE);
fos.write(TEST_DATA);
fos.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testByteRange() throws IOException {
ByteRangeInputStream.URLOpener oMock=getMockURLOpener(new URL("http://test"));
ByteRangeInputStream.URLOpener rMock=getMockURLOpener(null);
ByteRangeInputStream bris=new ByteRangeInputStreamImpl(oMock,rMock);
bris.seek(0);
assertEquals("getPos wrong",0,bris.getPos());
bris.read();
assertEquals("Initial call made incorrectly (offset check)",0,bris.startPos);
assertEquals("getPos should return 1 after reading one byte",1,bris.getPos());
verify(oMock,times(1)).connect(0,false);
bris.read();
assertEquals("getPos should return 2 after reading two bytes",2,bris.getPos());
verify(oMock,times(1)).connect(0,false);
rMock.setURL(new URL("http://resolvedurl/"));
bris.seek(100);
bris.read();
assertEquals("Seek to 100 bytes made incorrectly (offset Check)",100,bris.startPos);
assertEquals("getPos should return 101 after reading one byte",101,bris.getPos());
verify(rMock,times(1)).connect(100,true);
bris.seek(101);
bris.read();
verify(rMock,times(1)).connect(100,true);
verify(rMock,times(0)).connect(101,true);
bris.seek(2500);
bris.read();
assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",2500,bris.startPos);
doReturn(getMockConnection(null)).when(rMock).connect(anyLong(),anyBoolean());
bris.seek(500);
try {
bris.read();
fail("Exception should be thrown when content-length is not given");
}
catch ( IOException e) {
assertTrue("Incorrect response message: " + e.getMessage(),e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH + " is missing: "));
}
bris.close();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPropagatedClose() throws IOException {
ByteRangeInputStream bris=mock(ByteRangeInputStream.class,CALLS_REAL_METHODS);
InputStream mockStream=mock(InputStream.class);
doReturn(mockStream).when(bris).openInputStream();
Whitebox.setInternalState(bris,"status",ByteRangeInputStream.StreamStatus.SEEK);
int brisOpens=0;
int brisCloses=0;
int isCloses=0;
bris.getInputStream();
verify(bris,times(++brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(isCloses)).close();
bris.getInputStream();
verify(bris,times(brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(isCloses)).close();
bris.seek(1);
bris.getInputStream();
verify(bris,times(++brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(++isCloses)).close();
bris.getInputStream();
verify(bris,times(brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(isCloses)).close();
bris.seek(1);
bris.getInputStream();
verify(bris,times(brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(isCloses)).close();
bris.close();
verify(bris,times(++brisCloses)).close();
verify(mockStream,times(++isCloses)).close();
bris.close();
verify(bris,times(++brisCloses)).close();
verify(mockStream,times(isCloses)).close();
boolean errored=false;
try {
bris.getInputStream();
}
catch ( IOException e) {
errored=true;
assertEquals("Stream closed",e.getMessage());
}
finally {
assertTrue("Read a closed steam",errored);
}
verify(bris,times(brisOpens)).openInputStream();
verify(bris,times(brisCloses)).close();
verify(mockStream,times(isCloses)).close();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConcat() throws Exception {
Path[] paths={new Path("/test/hadoop/file1"),new Path("/test/hadoop/file2"),new Path("/test/hadoop/file3")};
DFSTestUtil.createFile(fSys,paths[0],1024,(short)3,0);
DFSTestUtil.createFile(fSys,paths[1],1024,(short)3,0);
DFSTestUtil.createFile(fSys,paths[2],1024,(short)3,0);
Path catPath=new Path("/test/hadoop/catFile");
DFSTestUtil.createFile(fSys,catPath,1024,(short)3,0);
Assert.assertTrue(exists(fSys,catPath));
fSys.concat(catPath,paths);
Assert.assertFalse(exists(fSys,paths[0]));
Assert.assertFalse(exists(fSys,paths[1]));
Assert.assertFalse(exists(fSys,paths[2]));
FileStatus fileStatus=fSys.getFileStatus(catPath);
Assert.assertEquals(1024 * 4,fileStatus.getLen());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Override @Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
createFile(getTestRootPath(fSys,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
try {
Assert.assertFalse(exists(fSys,testSubDir));
}
catch ( AccessControlException e) {
}
Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
try {
Assert.assertFalse(exists(fSys,testDeepSubDir));
}
catch ( AccessControlException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,"swebhdfs");
final Path f=new Path("/testswebhdfs");
FSDataOutputStream os=fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
InputStream is=fs.open(f);
Assert.assertEquals(23,is.read());
is.close();
fs.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testRenewal() throws Exception {
Configuration conf=new Configuration();
Token> token1=mock(Token.class);
Token> token2=mock(Token.class);
final long renewCycle=100;
DelegationTokenRenewer.renewCycle=renewCycle;
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("foo",new String[]{"bar"});
DummyFs fs=spy(new DummyFs());
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
doReturn(token1).when(fs).getRenewToken();
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,null);
final URI uri=new URI("dummyfs://127.0.0.1:1234");
TokenAspect tokenAspect=new TokenAspect(fs,SecurityUtil.buildTokenService(uri),DummyFs.TOKEN_KIND);
fs.initialize(uri,conf);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
DelegationTokenRenewer.RenewAction> action=getActionFromTokenAspect(tokenAspect);
verify(fs).setDelegationToken(token1);
assertTrue(action.isValid());
Thread.sleep(renewCycle * 2);
assertSame(action,getActionFromTokenAspect(tokenAspect));
assertFalse(action.isValid());
tokenAspect.ensureTokenInitialized();
verify(fs,times(2)).getDelegationToken(anyString());
verify(fs).setDelegationToken(token2);
assertNotSame(action,getActionFromTokenAspect(tokenAspect));
action=getActionFromTokenAspect(tokenAspect);
assertTrue(action.isValid());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot deletion through WebHdfs
*/
@Test public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,"s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifier
/**
* WebHdfs should be enabled by default after HDFS-5532
* @throws Exception
*/
@Test public void testWebHdfsEnabledByDefault() throws Exception {
Configuration conf=new HdfsConfiguration();
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,false));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testLargeDirectory() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final int listLimit=2;
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,listLimit);
FsPermission.setUMask(conf,new FsPermission((short)0077));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.setLoginUser(UserGroupInformation.createUserForTesting("not-superuser",new String[]{"not-supergroup"}));
UserGroupInformation.createUserForTesting("me",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit * 3; i++) {
Path p=new Path(d,"file-" + i);
Assert.assertTrue(fs.createNewFile(p));
}
Assert.assertEquals(listLimit * 3,fs.listStatus(d).length);
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testNumericalUserName() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,"^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.createUserForTesting("123",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test snapshot creation through WebHdfs
*/
@Test public void testWebHdfsCreateSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
try {
webHdfs.createSnapshot(foo);
fail("Cannot create snapshot on a non-snapshottable directory");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory",e);
}
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot rename through WebHdfs
*/
@Test public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.renameSnapshot(foo,"s1","s2");
Assert.assertFalse(webHdfs.exists(s1path));
final Path s2path=SnapshotTestHelper.getSnapshotRoot(foo,"s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo,"s2");
Assert.assertFalse(webHdfs.exists(s2path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHA() throws IOException {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitActive();
fs=FileSystem.get(WEBHDFS_URI,conf);
cluster.transitionToActive(0);
final Path dir=new Path("/test");
Assert.assertTrue(fs.mkdirs(dir));
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
final Path dir2=new Path("/test2");
Assert.assertTrue(fs.mkdirs(dir2));
}
finally {
IOUtils.cleanup(null,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifier
/**
* Make sure the WebHdfsFileSystem will retry based on RetriableException when
* rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
*/
@Test(timeout=120000) public void testRetryWhileNNStartup() throws Exception {
final Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster=null;
final Map resultMap=new HashMap();
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitActive();
cluster.transitionToActive(0);
final NameNode namenode=cluster.getNameNode(0);
final NamenodeProtocols rpcServer=namenode.getRpcServer();
Whitebox.setInternalState(namenode,"rpcServer",null);
new Thread(){
@Override public void run(){
boolean result=false;
FileSystem fs=null;
try {
fs=FileSystem.get(WEBHDFS_URI,conf);
final Path dir=new Path("/test");
result=fs.mkdirs(dir);
}
catch ( IOException e) {
result=false;
}
finally {
IOUtils.cleanup(null,fs);
}
synchronized (TestWebHDFSForHA.this) {
resultMap.put("mkdirs",result);
TestWebHDFSForHA.this.notifyAll();
}
}
}
.start();
Thread.sleep(1000);
Whitebox.setInternalState(namenode,"rpcServer",rpcServer);
synchronized (this) {
while (!resultMap.containsKey("mkdirs")) {
this.wait();
}
Assert.assertTrue(resultMap.get("mkdirs"));
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifier
@Test(timeout=1000) public void testPostOpRequireAuth(){
for ( HttpOpParam.Op op : PostOpParam.Op.values()) {
assertFalse(op.getRequireAuth());
}
}
BooleanVerifier
@Test(timeout=1000) public void testDeleteOpRequireAuth(){
for ( HttpOpParam.Op op : DeleteOpParam.Op.values()) {
assertFalse(op.getRequireAuth());
}
}
IterativeVerifier BooleanVerifier
@Test public void testServletFilter() throws Exception {
Configuration conf=new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,RecordingFilter.Initializer.class.getName());
HttpServer2 http=createTestServer(conf);
http.start();
final String fsckURL="/fsck";
final String stacksURL="/stacks";
final String ajspURL="/a.jsp";
final String listPathsURL="/listPaths";
final String dataURL="/data";
final String streamFile="/streamFile";
final String rootURL="/";
final String allURL="/*";
final String outURL="/static/a.out";
final String logURL="/logs/a.log";
final String[] urls={fsckURL,stacksURL,ajspURL,listPathsURL,dataURL,streamFile,rootURL,allURL,outURL,logURL};
final String prefix="http://" + NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for (int i=0; i < urls.length; i++) {
access(prefix + urls[i]);
}
}
finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
for (int i=0; i < urls.length; i++) {
assertTrue(RECORDS.remove(urls[i]));
}
assertTrue(RECORDS.isEmpty());
}
BooleanVerifier
@Test public void testNeedsQuoting() throws Exception {
assertTrue(HtmlQuoting.needsQuoting("abcde>"));
assertTrue(HtmlQuoting.needsQuoting("
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpsCookie() throws IOException, GeneralSecurityException {
URL base=new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(1)));
HttpsURLConnection conn=(HttpsURLConnection)new URL(base,"/echo").openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue(cookies.get(0).getSecure());
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpCookie() throws IOException {
URL base=new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0)));
HttpURLConnection conn=(HttpURLConnection)new URL(base,"/echo").openConnection();
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
InternalCallVerifier BooleanVerifier
@Test public void testRequestQuoterWithNotNull() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
String[] values=new String[]{"abc","def"};
Mockito.doReturn(values).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter=new RequestQuoter(request);
String[] parameterValues=requestQuoter.getParameterValues("dummy");
Assert.assertTrue("It should return Parameter Values",Arrays.equals(values,parameterValues));
}
InternalCallVerifier BooleanVerifier
@Test public void testHasAdministratorAccess() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,false);
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteUser()).thenReturn(null);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
response=Mockito.mock(HttpServletResponse.class);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context,request,response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN),Mockito.anyString());
response=Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getRemoteUser()).thenReturn("foo");
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
response=Mockito.mock(HttpServletResponse.class);
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context,request,response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN),Mockito.anyString());
response=Mockito.mock(HttpServletResponse.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
}
InternalCallVerifier BooleanVerifier
@Test public void testRequiresAuthorizationAccess() throws Exception {
Configuration conf=new Configuration();
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context,request,response));
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context,request,response));
}
BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test the maximum number of threads cannot be exceeded.
*/
@Test public void testMaxThreads() throws Exception {
int clientThreads=MAX_THREADS * 10;
Executor executor=Executors.newFixedThreadPool(clientThreads);
final CountDownLatch ready=new CountDownLatch(clientThreads);
final CountDownLatch start=new CountDownLatch(1);
for (int i=0; i < clientThreads; i++) {
executor.execute(new Runnable(){
@Override public void run(){
ready.countDown();
try {
start.await();
assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d")));
int serverThreads=server.webServer.getThreadPool().getThreads();
assertTrue("More threads are started than expected, Server Threads count: " + serverThreads,serverThreads <= MAX_THREADS);
System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = "+ MAX_THREADS);
}
catch ( Exception e) {
}
}
}
);
}
ready.await();
start.countDown();
}
IterativeVerifier BooleanVerifier
@Test public void testPathSpecFilters() throws Exception {
Configuration conf=new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,RecordingFilter.Initializer.class.getName());
String[] pathSpecs={"/path","/path/*"};
HttpServer2 http=createTestServer(conf,pathSpecs);
http.start();
final String baseURL="/path";
final String baseSlashURL="/path/";
final String addedURL="/path/nodes";
final String addedSlashURL="/path/nodes/";
final String longURL="/path/nodes/foo/job";
final String rootURL="/";
final String allURL="/*";
final String[] filteredUrls={baseURL,baseSlashURL,addedURL,addedSlashURL,longURL};
final String[] notFilteredUrls={rootURL,allURL};
final String prefix="http://" + NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for (int i=0; i < filteredUrls.length; i++) {
access(prefix + filteredUrls[i]);
}
for (int i=0; i < notFilteredUrls.length; i++) {
access(prefix + notFilteredUrls[i]);
}
}
finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
for (int i=0; i < filteredUrls.length; i++) {
assertTrue(RECORDS.remove(filteredUrls[i]));
}
assertTrue(RECORDS.isEmpty());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testServletFilterWhenInitThrowsException() throws Exception {
Configuration conf=new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,ErrorFilter.Initializer.class.getName());
HttpServer2 http=createTestServer(conf);
try {
http.start();
fail("expecting exception");
}
catch ( IOException e) {
assertTrue(e.getMessage().contains("Problem in starting http server. Server handlers failed"));
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOldFormat() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null);
in.reset(out.getData(),out.getLength());
@SuppressWarnings("deprecation") String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className);
int length=in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length);
int[] readValue=new int[length];
try {
for (int i=0; i < length; i++) {
readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null));
}
}
catch ( Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null,true);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out,apw,apw.getClass(),null,true);
in.reset(out.getData(),out.getLength());
String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className);
ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get())));
String declaredClassName=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName);
className=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className);
ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get())));
}
IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMany() throws IOException {
for ( Object x : bigSet) {
ObjectWritable.writeObject(out,x,x.getClass(),null,true);
(new ArrayPrimitiveWritable(x)).write(out);
}
in.reset(out.getData(),out.getLength());
for (int x=0; x < resultSet.length; ) {
resultSet[x++]=ObjectWritable.readObject(in,null);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable();
apw.readFields(in);
resultSet[x++]=apw.get();
}
assertEquals(expectedResultSet.length,resultSet.length);
for (int x=0; x < resultSet.length; x++) {
assertEquals("ComponentType of array " + x,expectedResultSet[x].getClass().getComponentType(),resultSet[x].getClass().getComponentType());
}
assertTrue("In and Out arrays didn't match values",Arrays.deepEquals(expectedResultSet,resultSet));
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* test {@link BooleanWritable} methods hashCode(), equals(), compareTo()
*/
@Test public void testCommonMethods(){
assertTrue("testCommonMethods1 error !!!",newInstance(true).equals(newInstance(true)));
assertTrue("testCommonMethods2 error !!!",newInstance(false).equals(newInstance(false)));
assertFalse("testCommonMethods3 error !!!",newInstance(false).equals(newInstance(true)));
assertTrue("testCommonMethods4 error !!!",checkHashCode(newInstance(true),newInstance(true)));
assertFalse("testCommonMethods5 error !!! ",checkHashCode(newInstance(true),newInstance(false)));
assertTrue("testCommonMethods6 error !!!",newInstance(true).compareTo(newInstance(false)) > 0);
assertTrue("testCommonMethods7 error !!!",newInstance(false).compareTo(newInstance(true)) < 0);
assertTrue("testCommonMethods8 error !!!",newInstance(false).compareTo(newInstance(false)) == 0);
assertEquals("testCommonMethods9 error !!!","true",newInstance(true).toString());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* This test was written as result of adding the new zero
* copy constructor and set method to BytesWritable. These
* methods allow users to specify the backing buffer of the
* BytesWritable instance and a length.
*/
@Test public void testZeroCopy(){
byte[] bytes="brock".getBytes();
BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length);
BytesWritable copyBuf=new BytesWritable(bytes);
assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes());
assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length);
assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf);
assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString());
assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0);
assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode());
byte[] buffer=new byte[bytes.length * 5];
zeroBuf.set(buffer,0,buffer.length);
zeroBuf.set(bytes,0,bytes.length);
assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf);
assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSizeChange() throws Exception {
byte[] hadoop="hadoop".getBytes();
BytesWritable buf=new BytesWritable(hadoop);
int size=buf.getLength();
int orig_capacity=buf.getCapacity();
buf.setSize(size * 2);
int new_capacity=buf.getCapacity();
System.arraycopy(buf.getBytes(),0,buf.getBytes(),size,size);
assertTrue(new_capacity >= size * 2);
assertEquals(size * 2,buf.getLength());
assertTrue(new_capacity != orig_capacity);
buf.setSize(size * 4);
assertTrue(new_capacity != buf.getCapacity());
for (int i=0; i < size * 2; ++i) {
assertEquals(hadoop[i % size],buf.getBytes()[i]);
}
assertEquals(size * 4,buf.copyBytes().length);
buf.setCapacity(1);
assertEquals(1,buf.getLength());
assertEquals(hadoop[0],buf.getBytes()[0]);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test {@link ByteWritable}
* methods compareTo(), toString(), equals()
*/
@Test public void testObjectCommonMethods(){
byte b=0x9;
ByteWritable bw=new ByteWritable();
bw.set(b);
assertTrue("testSetByteWritable error",bw.get() == b);
assertTrue("testSetByteWritable error < 0",bw.compareTo(new ByteWritable((byte)0xA)) < 0);
assertTrue("testSetByteWritable error > 0",bw.compareTo(new ByteWritable((byte)0x8)) > 0);
assertTrue("testSetByteWritable error == 0",bw.compareTo(new ByteWritable((byte)0x9)) == 0);
assertTrue("testSetByteWritable equals error !!!",bw.equals(new ByteWritable((byte)0x9)));
assertTrue("testSetByteWritable equals error !!!",!bw.equals(new ByteWritable((byte)0xA)));
assertTrue("testSetByteWritable equals error !!!",!bw.equals(new IntWritable(1)));
assertEquals("testSetByteWritable error ","9",bw.toString());
}
IterativeVerifier BooleanVerifier PublicFieldVerifier
@Test public void testCompare() throws Exception {
byte[][] values=new byte[][]{"abc".getBytes(),"ad".getBytes(),"abcd".getBytes(),"".getBytes(),"b".getBytes()};
BytesWritable[] buf=new BytesWritable[values.length];
for (int i=0; i < values.length; ++i) {
buf[i]=new BytesWritable(values[i]);
}
for (int i=0; i < values.length; ++i) {
for (int j=0; j < values.length; ++j) {
assertTrue(buf[i].compareTo(buf[j]) == -buf[j].compareTo(buf[i]));
assertTrue((i == j) == (buf[i].compareTo(buf[j]) == 0));
}
}
assertTrue(buf[0].compareTo(buf[1]) < 0);
assertTrue(buf[1].compareTo(buf[2]) > 0);
assertTrue(buf[2].compareTo(buf[3]) > 0);
assertTrue(buf[3].compareTo(buf[4]) < 0);
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* test throwing {@code IOException} in {@code MapFile.Writer} constructor
*/
@Test public void testWriteWithFailDirCreation(){
String ERROR_MESSAGE="Mkdirs failed to create directory";
Path dirName=new Path(TEST_DIR,"fail.mapfile");
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
FileSystem spyFs=spy(fs);
Path pathSpy=spy(dirName);
when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
when(spyFs.mkdirs(dirName)).thenReturn(false);
writer=new MapFile.Writer(conf,pathSpy,MapFile.Writer.keyClass(IntWritable.class),MapFile.Writer.valueClass(Text.class));
fail("testWriteWithFailDirCreation error !!!");
}
catch ( IOException ex) {
assertTrue("testWriteWithFailDirCreation ex error !!!",ex.getMessage().startsWith(ERROR_MESSAGE));
}
finally {
IOUtils.cleanup(null,writer);
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test public void testReaderKeyIteration(){
final String TEST_METHOD_KEY="testReaderKeyIteration.mapfile";
int SIZE=10;
int ITERATIONS=5;
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_METHOD_KEY,IntWritable.class,Text.class);
int start=0;
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new Text("Value:" + i));
writer.close();
reader=createReader(TEST_METHOD_KEY,IntWritable.class);
Writable startValue=new Text("Value:" + start);
int i=0;
while (i++ < ITERATIONS) {
IntWritable key=new IntWritable(start);
Writable value=startValue;
while (reader.next(key,value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",reader.seek(new IntWritable(SIZE * 2)));
}
catch ( IOException ex) {
fail("reader seek error !!!");
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* test {@code MapFile.Writer.testFix} method
*/
@Test public void testFix(){
final String INDEX_LESS_MAP_FILE="testFix.mapfile";
int PAIR_SIZE=20;
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
Path dir=new Path(TEST_DIR,INDEX_LESS_MAP_FILE);
writer=createWriter(INDEX_LESS_MAP_FILE,IntWritable.class,Text.class);
for (int i=0; i < PAIR_SIZE; i++) writer.append(new IntWritable(0),new Text("value"));
writer.close();
File indexFile=new File(".","." + INDEX_LESS_MAP_FILE + "/index");
boolean isDeleted=false;
if (indexFile.exists()) isDeleted=indexFile.delete();
if (isDeleted) assertTrue("testFix error !!!",MapFile.fix(fs,dir,IntWritable.class,Text.class,true,conf) == PAIR_SIZE);
}
catch ( Exception ex) {
fail("testFix error !!!");
}
finally {
IOUtils.cleanup(null,writer);
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRenameWithFalse(){
final String ERROR_MESSAGE="Could not rename";
final String NEW_FILE_NAME="test-new.mapfile";
final String OLD_FILE_NAME="test-old.mapfile";
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
FileSystem spyFs=spy(fs);
writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class);
writer.close();
Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME);
Path newDir=new Path(TEST_DIR,NEW_FILE_NAME);
when(spyFs.rename(oldDir,newDir)).thenReturn(false);
MapFile.rename(spyFs,oldDir.toString(),newDir.toString());
fail("testRenameWithException no exception error !!!");
}
catch ( IOException ex) {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!",ex.getMessage().startsWith(ERROR_MESSAGE));
}
finally {
IOUtils.cleanup(null,writer);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testPutAll(){
SortedMapWritable map1=new SortedMapWritable();
SortedMapWritable map2=new SortedMapWritable();
map1.put(new Text("key"),new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries",map1,map2);
assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* the test
*/
@Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){
Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())};
SortedMapWritable inMap=new SortedMapWritable();
for (int i=0; i < keys.length; i++) {
inMap.put(keys[i],values[i]);
}
assertEquals(0,inMap.firstKey().compareTo(keys[0]));
assertEquals(0,inMap.lastKey().compareTo(keys[2]));
SortedMapWritable outMap=new SortedMapWritable(inMap);
assertEquals(inMap.size(),outMap.size());
for ( Map.Entry e : inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue()));
}
Text[] maps={new Text("map1"),new Text("map2")};
SortedMapWritable mapOfMaps=new SortedMapWritable();
mapOfMaps.put(maps[0],inMap);
mapOfMaps.put(maps[1],outMap);
SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps);
for (int i=0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]);
SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(),b.size());
for ( Writable key : a.keySet()) {
assertTrue(b.containsKey(key));
WritableComparable aValue=(WritableComparable)a.get(key);
WritableComparable bValue=(WritableComparable)b.get(key);
assertEquals(0,aValue.compareTo(bValue));
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test public void testEqualsAndHashCode(){
String failureReason;
SortedMapWritable mapA=new SortedMapWritable();
SortedMapWritable mapB=new SortedMapWritable();
failureReason="SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason,mapA);
assertNotNull(failureReason,mapB);
assertFalse("equals method returns true when passed null",mapA.equals(null));
assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB));
Text[] keys={new Text("key1"),new Text("key2")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())};
mapA.put(keys[0],values[0]);
mapB.put(keys[1],values[1]);
failureReason="Two SortedMapWritables with different data are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
mapA.put(keys[1],values[1]);
mapB.put(keys[0],values[0]);
failureReason="Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason,mapA.hashCode(),mapB.hashCode());
assertTrue(failureReason,mapA.equals(mapB));
assertTrue(failureReason,mapB.equals(mapA));
mapA.put(keys[0],values[1]);
mapA.put(keys[1],values[0]);
failureReason="Two SortedMapWritables with different content are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipCodecRead() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg="This is the message in the file!";
bw.write(msg);
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
String line=br.readLine();
assertEquals("Didn't get the same message back!",msg,line);
br.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCodecPoolGzipReuse() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc=ReflectionUtils.newInstance(GzipCodec.class,conf);
DefaultCodec dfc=ReflectionUtils.newInstance(DefaultCodec.class,conf);
Compressor c1=CodecPool.getCompressor(gzc);
Compressor c2=CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor",c2 != CodecPool.getCompressor(gzc));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipLongOverflow.bin.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final int NBUF=1024 * 4 + 1;
final char[] buf=new char[1024 * 1024];
for (int i=0; i < buf.length; i++) buf[i]='\0';
for (int i=0; i < NBUF; i++) {
bw.write(buf);
}
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
for (int j=0; j < NBUF; j++) {
int n=br.read(buf);
assertEquals("got wrong read length!",n,buf.length);
for (int i=0; i < buf.length; i++) assertEquals("got wrong byte!",buf[i],'\0');
}
br.close();
}
InternalCallVerifier BooleanVerifier
/**
* In {@link CompressorStream#close()}, if {@link CompressorStream#finish()} throws an IOEXception, outputStream
* object was not getting closed.
*/
@Test public void testClose(){
TestCompressorStream testCompressorStream=new TestCompressorStream();
try {
testCompressorStream.close();
}
catch ( IOException e) {
System.out.println("Expected IOException");
}
Assert.assertTrue("closed shoud be true",((CompressorStream)testCompressorStream).closed);
file.delete();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCompressDecompress(){
int BYTE_SIZE=1024 * 54;
byte[] bytes=generate(BYTE_SIZE);
Lz4Compressor compressor=new Lz4Compressor();
try {
compressor.setInput(bytes,0,bytes.length);
assertTrue("Lz4CompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0);
assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0);
byte[] compressed=new byte[BYTE_SIZE];
int cSize=compressor.compress(compressed,0,compressed.length);
assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0);
Lz4Decompressor decompressor=new Lz4Decompressor();
decompressor.setInput(compressed,0,cSize);
byte[] decompressed=new byte[BYTE_SIZE];
decompressor.decompress(decompressed,0,decompressed.length);
assertTrue("testLz4CompressDecompress finished error !!!",decompressor.finished());
assertArrayEquals(bytes,decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
}
catch ( Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier HybridVerifier
@Test public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize(){
int BYTES_SIZE=1024 * 64 + 1;
try {
Lz4Compressor compressor=new Lz4Compressor();
byte[] bytes=generate(BYTES_SIZE);
assertTrue("needsInput error !!!",compressor.needsInput());
compressor.setInput(bytes,0,bytes.length);
byte[] emptyBytes=new byte[BYTES_SIZE];
int csize=compressor.compress(emptyBytes,0,bytes.length);
assertTrue("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!",csize != 0);
}
catch ( Exception ex) {
fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSnappyCompressDecompress(){
int BYTE_SIZE=1024 * 54;
byte[] bytes=BytesGenerator.get(BYTE_SIZE);
SnappyCompressor compressor=new SnappyCompressor();
try {
compressor.setInput(bytes,0,bytes.length);
assertTrue("SnappyCompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0);
assertTrue("SnappyCompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0);
byte[] compressed=new byte[BYTE_SIZE];
int cSize=compressor.compress(compressed,0,compressed.length);
assertTrue("SnappyCompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0);
SnappyDecompressor decompressor=new SnappyDecompressor(BYTE_SIZE);
decompressor.setInput(compressed,0,cSize);
byte[] decompressed=new byte[BYTE_SIZE];
decompressor.decompress(decompressed,0,decompressed.length);
assertTrue("testSnappyCompressDecompress finished error !!!",decompressor.finished());
Assert.assertArrayEquals(bytes,decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
}
catch ( Exception e) {
fail("testSnappyCompressDecompress ex error!!!");
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testSnappyBlockCompression(){
int BYTE_SIZE=1024 * 50;
int BLOCK_SIZE=512;
ByteArrayOutputStream out=new ByteArrayOutputStream();
byte[] block=new byte[BLOCK_SIZE];
byte[] bytes=BytesGenerator.get(BYTE_SIZE);
try {
SnappyCompressor compressor=new SnappyCompressor();
int off=0;
int len=BYTE_SIZE;
int maxSize=BLOCK_SIZE - 18;
if (BYTE_SIZE > maxSize) {
do {
int bufLen=Math.min(len,maxSize);
compressor.setInput(bytes,off,bufLen);
compressor.finish();
while (!compressor.finished()) {
compressor.compress(block,0,block.length);
out.write(block);
}
compressor.reset();
off+=bufLen;
len-=bufLen;
}
while (len > 0);
}
assertTrue("testSnappyBlockCompression error !!!",out.toByteArray().length > 0);
}
catch ( Exception ex) {
fail("testSnappyBlockCompression ex error !!!");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testBuiltInGzipDecompressorExceptions(){
BuiltInGzipDecompressor decompresser=new BuiltInGzipDecompressor();
try {
decompresser.setInput(null,0,1);
}
catch ( NullPointerException ex) {
}
catch ( Exception ex) {
fail("testBuiltInGzipDecompressorExceptions npe error " + ex);
}
try {
decompresser.setInput(new byte[]{0},0,-1);
}
catch ( ArrayIndexOutOfBoundsException ex) {
}
catch ( Exception ex) {
fail("testBuiltInGzipDecompressorExceptions aioob error" + ex);
}
assertTrue("decompresser.getBytesRead error",decompresser.getBytesRead() == 0);
assertTrue("decompresser.getRemaining error",decompresser.getRemaining() == 0);
decompresser.reset();
decompresser.end();
InputStream decompStream=null;
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{0,0,1,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 0 and 1 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,7,1,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 2 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,8,-32,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 3 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,8,4,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 3 byte make hasExtraField" + ex);
}
}
BooleanVerifier
@Test public void testZlibFactory(){
Configuration cfg=new Configuration();
assertTrue("testZlibFactory compression level error !!!",CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory.getCompressionLevel(cfg));
assertTrue("testZlibFactory compression strategy error !!!",CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory.getCompressionStrategy(cfg));
ZlibFactory.setCompressionLevel(cfg,CompressionLevel.BEST_COMPRESSION);
assertTrue("testZlibFactory compression strategy error !!!",CompressionLevel.BEST_COMPRESSION == ZlibFactory.getCompressionLevel(cfg));
ZlibFactory.setCompressionStrategy(cfg,CompressionStrategy.FILTERED);
assertTrue("testZlibFactory compression strategy error !!!",CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testZlibCompressDecompress(){
byte[] rawData=null;
int rawDataSize=0;
rawDataSize=1024 * 64;
rawData=generate(rawDataSize);
try {
ZlibCompressor compressor=new ZlibCompressor();
ZlibDecompressor decompressor=new ZlibDecompressor();
assertFalse("testZlibCompressDecompress finished error",compressor.finished());
compressor.setInput(rawData,0,rawData.length);
assertTrue("testZlibCompressDecompress getBytesRead before error",compressor.getBytesRead() == 0);
compressor.finish();
byte[] compressedResult=new byte[rawDataSize];
int cSize=compressor.compress(compressedResult,0,rawDataSize);
assertTrue("testZlibCompressDecompress getBytesRead ather error",compressor.getBytesRead() == rawDataSize);
assertTrue("testZlibCompressDecompress compressed size no less then original size",cSize < rawDataSize);
decompressor.setInput(compressedResult,0,cSize);
byte[] decompressedBytes=new byte[rawDataSize];
decompressor.decompress(decompressedBytes,0,decompressedBytes.length);
assertArrayEquals("testZlibCompressDecompress arrays not equals ",rawData,decompressedBytes);
compressor.reset();
decompressor.reset();
}
catch ( IOException ex) {
fail("testZlibCompressDecompress ex !!!" + ex);
}
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testZlibCompressorDecompressorWithConfiguration(){
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber=5;
int BYTE_SIZE=10 * 1024;
Compressor zlibCompressor=ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
rawData=generate(BYTE_SIZE);
try {
for (int i=0; i < tryNumber; i++) compressDecompressZlib(rawData,(ZlibCompressor)zlibCompressor,(ZlibDecompressor)zlibDecompressor);
zlibCompressor.reinit(conf);
}
catch ( Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
}
else {
assertTrue("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
}
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testZlibCompressorDecompressorSetDictionary(){
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor=ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
}
else {
assertTrue("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
}
}
BranchVerifier BooleanVerifier
@Test public void testFailureCompressionNotWorking() throws IOException {
if (skip) return;
long rawDataSize=writeRecords(10 * records1stBlock,false);
if (!compression.equalsIgnoreCase(Compression.Algorithm.NONE.getName())) {
Assert.assertTrue(out.getPos() < rawDataSize);
}
closeOutput();
}
InternalCallVerifier BooleanVerifier
@Test public void testNoDataEntry() throws IOException {
if (skip) return;
closeOutput();
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
Assert.assertTrue(reader.isSorted());
Scanner scanner=reader.createScanner();
Assert.assertTrue(scanner.atEnd());
scanner.close();
reader.close();
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test basic chmod operation
*/
@Test(timeout=30000) public void testChmod() throws Exception {
if (Path.WINDOWS) {
return;
}
try {
NativeIO.POSIX.chmod("/this/file/doesnt/exist",777);
fail("Chmod of non-existent file didn't fail");
}
catch ( NativeIOException nioe) {
assertEquals(Errno.ENOENT,nioe.getErrno());
}
File toChmod=new File(TEST_DIR,"testChmod");
assertTrue("Create test subject",toChmod.exists() || toChmod.mkdir());
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0777);
assertPermissions(toChmod,0777);
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0000);
assertPermissions(toChmod,0000);
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0644);
assertPermissions(toChmod,0644);
}
UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testOpenWithCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Test creating a file with O_CREAT");
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos=new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
assertFalse(fd.valid());
LOG.info("Test exclusive create");
try {
fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL,0700);
fail("Was able to create existing file with O_EXCL");
}
catch ( NativeIOException nioe) {
LOG.info("Got expected exception for failed exclusive create",nioe);
assertEquals(Errno.EEXIST,nioe.getErrno());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test for races in fstat usage
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout=30000) public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
final AtomicReference thrown=new AtomicReference();
List statters=new ArrayList();
for (int i=0; i < 10; i++) {
Thread statter=new Thread(){
@Override public void run(){
long et=Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"),stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
catch ( Throwable t) {
thrown.set(t);
}
}
}
}
;
statters.add(statter);
statter.start();
}
for ( Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSetFilePointer() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Set a file pointer on Windows");
try {
File testfile=new File(TEST_DIR,"testSetFilePointer");
assertTrue("Create test subject",testfile.exists() || testfile.createNewFile());
FileWriter writer=new FileWriter(testfile);
try {
for (int i=0; i < 200; i++) if (i < 100) writer.write('a');
else writer.write('b');
writer.flush();
}
catch ( Exception writerException) {
fail("Got unexpected exception: " + writerException.getMessage());
}
finally {
writer.close();
}
FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING);
NativeIO.Windows.setFilePointer(fd,120,NativeIO.Windows.FILE_BEGIN);
FileReader reader=new FileReader(fd);
try {
int c=reader.read();
assertTrue("Unexpected character: " + c,c == 'b');
}
catch ( Exception readerException) {
fail("Got unexpected exception: " + readerException.getMessage());
}
finally {
reader.close();
}
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
BooleanVerifier
/**
* Validate access checks on Windows
*/
@Test(timeout=30000) public void testAccess() throws Exception {
if (!Path.WINDOWS) {
return;
}
File testFile=new File(TEST_DIR,"testfileaccess");
assertTrue(testFile.createNewFile());
FileUtil.setReadable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setReadable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setWritable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setWritable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setExecutable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
FileUtil.setExecutable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
String testFileRelativePath="";
for (int i=0; i < 15; ++i) {
testFileRelativePath+="testfileaccessfolder\\";
}
testFileRelativePath+="testfileaccess";
testFile=new File(TEST_DIR,testFileRelativePath);
assertTrue(testFile.getParentFile().mkdirs());
assertTrue(testFile.createNewFile());
FileUtil.setReadable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setReadable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setWritable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setWritable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setExecutable(testFile,false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
FileUtil.setExecutable(testFile,true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRenameTo() throws Exception {
final File TEST_DIR=new File(new File(System.getProperty("test.build.data","build/test/data")),"renameTest");
assumeTrue(TEST_DIR.mkdirs());
File nonExistentFile=new File(TEST_DIR,"nonexistent");
File targetFile=new File(TEST_DIR,"target");
try {
NativeIO.renameTo(nonExistentFile,targetFile);
Assert.fail();
}
catch ( NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(String.format("The system cannot find the file specified.%n"),e.getMessage());
}
else {
Assert.assertEquals(Errno.ENOENT,e.getErrno());
}
}
File sourceFile=new File(TEST_DIR,"source");
Assert.assertTrue(sourceFile.createNewFile());
NativeIO.renameTo(sourceFile,sourceFile);
NativeIO.renameTo(sourceFile,targetFile);
sourceFile=new File(TEST_DIR,"source");
Assert.assertTrue(sourceFile.createNewFile());
File badTarget=new File(targetFile,"subdir");
try {
NativeIO.renameTo(sourceFile,badTarget);
Assert.fail();
}
catch ( NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(String.format("The parameter is incorrect.%n"),e.getMessage());
}
else {
Assert.assertEquals(Errno.ENOTDIR,e.getErrno());
}
}
FileUtils.deleteQuietly(TEST_DIR);
}
BooleanVerifier
@Test(timeout=30000) public void testGetUserName() throws IOException {
if (Path.WINDOWS) {
return;
}
assertFalse(NativeIO.POSIX.getUserName(0).isEmpty());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testCreateFile() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Open a file on Windows with SHARE_DELETE shared mode");
try {
File testfile=new File(TEST_DIR,"testCreateFile");
assertTrue("Create test subject",testfile.exists() || testfile.createNewFile());
FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING);
FileInputStream fin=new FileInputStream(fd);
try {
fin.read();
File newfile=new File(TEST_DIR,"testRenamedFile");
boolean renamed=testfile.renameTo(newfile);
assertTrue("Rename failed.",renamed);
fin.read();
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
finally {
fin.close();
}
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
IterativeVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that opens and closes a file 10000 times - this would crash with
* "Too many open files" if we leaked fds using this access pattern.
*/
@Test(timeout=30000) public void testFDDoesntLeak() throws IOException {
if (Path.WINDOWS) {
return;
}
for (int i=0; i < 10000; i++) {
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testNoFdLeak").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos=new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testFstat() throws Exception {
FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner=stat.getOwner();
String expectedOwner=System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
assertEquals(expectedOwner,owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
BooleanVerifier
@Test(timeout=30000) public void testGetGroupName() throws IOException {
if (Path.WINDOWS) {
return;
}
assertFalse(NativeIO.POSIX.getGroupName(0).isEmpty());
}
APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testCleanupRemainders() throws Exception {
Assume.assumeTrue(NativeIO.isAvailable());
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
File path=new File(TEST_BASE,"testCleanupRemainders");
path.mkdirs();
String remainder1=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder1";
String remainder2=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder2";
createTempFile(remainder1);
createTempFile(remainder2);
SharedFileDescriptorFactory.create("woot2_",new String[]{path.getAbsolutePath()});
Assert.assertFalse(new File(remainder1).exists());
Assert.assertFalse(new File(remainder2).exists());
FileUtil.fullyDelete(path);
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Ensure that normal IO exceptions don't result in a failover.
*/
@Test public void testExpectedIOException(){
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.REMOTE_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000));
try {
unreliable.failsIfIdentifierDoesntMatch("no-such-identifier");
fail("Should have thrown *some* exception");
}
catch ( Exception e) {
assertTrue("Expected IOE but got " + e.getClass(),e instanceof IOException);
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link RetryInvocationHandler#isRpcInvocation(Object)}
*/
@Test public void testRpcInvocation() throws Exception {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,RETRY_FOREVER);
assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
ProtocolTranslator xlator=new ProtocolTranslator(){
int count=0;
@Override public Object getUnderlyingProxyObject(){
count++;
return unreliable;
}
@Override public String toString(){
return "" + count;
}
}
;
assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
assertEquals(xlator.toString(),"1");
assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRetryInterruptible() throws Throwable {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,retryUpToMaximumTimeWithFixedSleep(10,10,TimeUnit.SECONDS));
final CountDownLatch latch=new CountDownLatch(1);
final AtomicReference futureThread=new AtomicReference();
ExecutorService exec=Executors.newSingleThreadExecutor();
Future future=exec.submit(new Callable(){
@Override public Throwable call() throws Exception {
futureThread.set(Thread.currentThread());
latch.countDown();
try {
unreliable.alwaysFailsWithFatalException();
}
catch ( UndeclaredThrowableException ute) {
return ute.getCause();
}
return null;
}
}
);
latch.await();
Thread.sleep(1000);
assertTrue(futureThread.get().isAlive());
futureThread.get().interrupt();
Throwable e=future.get(1,TimeUnit.SECONDS);
assertNotNull(e);
assertEquals(InterruptedException.class,e.getClass());
assertEquals("sleep interrupted",e.getMessage());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testStandAloneClient() throws IOException {
Client client=new Client(LongWritable.class,conf);
InetSocketAddress address=new InetSocketAddress("127.0.0.1",10);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( IOException e) {
String message=e.getMessage();
String addressText=address.getHostName() + ":" + address.getPort();
assertTrue("Did not find " + addressText + " in "+ message,message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in " + e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in "+ message,message.contains(causeText));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000) public void testCallRetryCount() throws IOException {
final int retryCount=255;
final Client client=new Client(LongWritable.class,conf);
Client.setCallIdAndRetryCount(Client.nextCallId(),255);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(retryCount,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
BooleanVerifier AssumptionSetter HybridVerifier
/**
* Check that file descriptors aren't leaked by starting
* and stopping IPC servers.
*/
@Test(timeout=60000) public void testSocketLeak() throws IOException {
Assume.assumeTrue(FD_DIR.exists());
long startFds=countOpenFileDescriptors();
for (int i=0; i < 50; i++) {
Server server=new TestServer(1,true);
server.start();
server.stop();
}
long endFds=countOpenFileDescriptors();
assertTrue("Leaked " + (endFds - startFds) + " file descriptors",endFds - startFds < 20);
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception {
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
final int maxIdle=1000;
final int cleanupInterval=maxIdle * 3 / 4;
final int killMax=3;
final int clients=1 + killMax * 2;
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval);
final CyclicBarrier firstCallBarrier=new CyclicBarrier(2);
final CyclicBarrier callBarrier=new CyclicBarrier(clients);
final CountDownLatch allCallLatch=new CountDownLatch(clients);
final AtomicBoolean error=new AtomicBoolean();
final TestServer server=new TestServer(clients,false);
Thread[] threads=new Thread[clients];
try {
server.callListener=new Runnable(){
AtomicBoolean first=new AtomicBoolean(true);
@Override public void run(){
try {
allCallLatch.countDown();
if (first.compareAndSet(true,false)) {
firstCallBarrier.await();
}
else {
callBarrier.await();
}
}
catch ( Throwable t) {
LOG.error(t);
error.set(true);
}
}
}
;
server.start();
final CountDownLatch callReturned=new CountDownLatch(clients - 1);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final Configuration clientConf=new Configuration();
clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000);
for (int i=0; i < clients; i++) {
threads[i]=new Thread(new Runnable(){
@Override public void run(){
Client client=new Client(LongWritable.class,clientConf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf);
callReturned.countDown();
Thread.sleep(10000);
}
catch ( IOException e) {
LOG.error(e);
}
catch ( InterruptedException e) {
}
}
}
);
threads[i].start();
}
allCallLatch.await();
assertFalse(error.get());
assertEquals(clients,server.getNumOpenConnections());
callBarrier.await();
callReturned.await();
assertEquals(clients,server.getNumOpenConnections());
Thread.sleep(maxIdle * 2 - cleanupInterval);
for (int i=clients; i > 1; i-=killMax) {
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(i,server.getNumOpenConnections());
}
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(1,server.getNumOpenConnections());
firstCallBarrier.await();
Thread.sleep(maxIdle * 2);
assertFalse(error.get());
assertEquals(0,server.getNumOpenConnections());
}
finally {
for ( Thread t : threads) {
if (t != null) {
t.interrupt();
t.join();
}
server.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000) public void testCallIdAndRetry() throws IOException {
final CallInfo info=new CallInfo();
final Client client=new Client(LongWritable.class,conf){
@Override Call createCall( RpcKind rpcKind, Writable rpcRequest){
final Call call=super.createCall(rpcKind,rpcRequest);
info.id=call.id;
info.retry=call.retry;
return call;
}
@Override void checkResponse( RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id,header.getCallId());
Assert.assertEquals(info.retry,header.getRetryCount());
}
}
;
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(info.id,Server.getCallId());
Assert.assertEquals(info.retry,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000) public void testInitialCallRetryCount() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(0,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that client generates a unique sequential call ID for each RPC call,
* even if multiple threads are using the same client.
* @throws InterruptedException
*/
@Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException {
int serverThreads=10, callerCount=100, perCallerCallCount=100;
TestServer server=new TestServer(serverThreads,false);
final List callIds=Collections.synchronizedList(new ArrayList());
server.callListener=new Runnable(){
@Override public void run(){
callIds.add(Server.getCallId());
}
}
;
Client client=new Client(LongWritable.class,conf);
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
SerialCaller[] callers=new SerialCaller[callerCount];
for (int i=0; i < callerCount; ++i) {
callers[i]=new SerialCaller(client,addr,perCallerCallCount);
callers[i].start();
}
for (int i=0; i < callerCount; ++i) {
callers[i].join();
assertFalse(callers[i].failed);
}
}
finally {
client.stop();
server.stop();
}
int expectedCallCount=callerCount * perCallerCallCount;
assertEquals(expectedCallCount,callIds.size());
Collections.sort(callIds);
final int startID=callIds.get(0).intValue();
for (int i=0; i < expectedCallCount; ++i) {
assertEquals(startID + i,callIds.get(i).intValue());
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that, if the socket factory throws an IOE, it properly propagates
* to the client.
*/
@Test(timeout=60000) public void testSocketFactoryException() throws IOException {
SocketFactory mockFactory=mock(SocketFactory.class);
doThrow(new IOException("Injected fault")).when(mockFactory).createSocket();
Client client=new Client(LongWritable.class,conf,mockFactory);
InetSocketAddress address=new InetSocketAddress("127.0.0.1",10);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( IOException e) {
assertTrue(e.getMessage().contains("Injected fault"));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
* HADOOP-7428.
*/
@Test(timeout=60000) public void testRTEDuringConnectionSetup() throws IOException {
SocketFactory spyFactory=spy(NetUtils.getDefaultSocketFactory(conf));
Mockito.doAnswer(new Answer(){
@Override public Socket answer( InvocationOnMock invocation) throws Throwable {
Socket s=spy((Socket)invocation.callRealMethod());
doThrow(new RuntimeException("Injected fault")).when(s).setSoTimeout(anyInt());
return s;
}
}
).when(spyFactory).createSocket();
Server server=new TestServer(1,true);
server.start();
try {
InetSocketAddress address=NetUtils.getConnectAddress(server);
Client client=new Client(LongWritable.class,conf,spyFactory);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( Exception e) {
LOG.info("caught expected exception",e);
assertTrue(StringUtils.stringifyException(e).contains("Injected fault"));
}
Mockito.reset(spyFactory);
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
}
finally {
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPluggableIdentityProvider(){
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider");
List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip=providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(),UserIdentityProvider.class);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test public void testSocketFactory() throws IOException {
Configuration sconf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
final int nameNodePort=cluster.getNameNodePort();
FileSystem fs=cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs=(DistributedFileSystem)fs;
Configuration cconf=getCustomSocketConfigs(nameNodePort);
fs=FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
JobClient client=null;
MiniMRYarnCluster miniMRYarnCluster=null;
try {
Path filePath=new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
fs=FileSystem.get(sconf);
JobConf jobConf=new JobConf();
FileSystem.setDefaultUri(jobConf,fs.getUri().toString());
miniMRYarnCluster=initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf=new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default","org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress=jconf.get("yarn.resourcemanager.address");
String[] split=rmAddress.split(":");
jconf.set("yarn.resourcemanager.address",split[0] + ':' + (Integer.parseInt(split[1]) + 10));
client=new JobClient(jconf);
JobStatus[] jobs=client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
}
finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testProtoBufRandomException() throws Exception {
TestRpcService client=getClient();
EmptyRequestProto emptyRequest=EmptyRequestProto.newBuilder().build();
try {
client.error2(null,emptyRequest);
}
catch ( ServiceException se) {
Assert.assertTrue(se.getCause() instanceof RemoteException);
RemoteException re=(RemoteException)se.getCause();
Assert.assertTrue(re.getClassName().equals(URISyntaxException.class.getName()));
Assert.assertTrue(re.getMessage().contains("testException"));
Assert.assertTrue(re.getErrorCode().equals(RpcErrorCodeProto.ERROR_APPLICATION));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testErrorMsgForInsecureClient() throws IOException {
Configuration serverConf=new Configuration(conf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,serverConf);
UserGroupInformation.setConfiguration(serverConf);
final Server server=new RPC.Builder(serverConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
UserGroupInformation.setConfiguration(conf);
boolean succeeded=false;
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestProtocol proxy=null;
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
proxy.echo("");
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded=true;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,2);
UserGroupInformation.setConfiguration(serverConf);
final Server multiServer=new RPC.Builder(serverConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
multiServer.start();
succeeded=false;
final InetSocketAddress mulitServerAddr=NetUtils.getConnectAddress(multiServer);
proxy=null;
try {
UserGroupInformation.setConfiguration(conf);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,mulitServerAddr,conf);
proxy.echo("");
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded=true;
}
finally {
multiServer.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRpcMetrics() throws Exception {
Configuration configuration=new Configuration();
final int interval=1;
configuration.setBoolean(CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,true);
configuration.set(CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
final Server server=new RPC.Builder(configuration).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
final TestProtocol proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,server.getListenerAddress(),configuration);
try {
for (int i=0; i < 1000; i++) {
proxy.ping();
proxy.echo("" + i);
}
MetricsRecordBuilder rpcMetrics=getMetrics(server.getRpcMetrics().name());
assertTrue("Expected non-zero rpc queue time",getLongCounter("RpcQueueTimeNumOps",rpcMetrics) > 0);
assertTrue("Expected non-zero rpc processing time",getLongCounter("RpcProcessingTimeNumOps",rpcMetrics) > 0);
MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",rpcMetrics);
}
finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSlowRpc() throws IOException {
System.out.println("Testing Slow RPC");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
TestProtocol proxy=null;
try {
server.start();
InetSocketAddress addr=NetUtils.getConnectAddress(server);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
SlowRPC slowrpc=new SlowRPC(proxy);
Thread thread=new Thread(slowrpc,"SlowRPC");
thread.start();
assertTrue("Slow RPC should not have finished1.",!slowrpc.isDone());
proxy.slowPing(false);
assertTrue("Slow RPC should not have finished2.",!slowrpc.isDone());
proxy.slowPing(false);
while (!slowrpc.isDone()) {
System.out.println("Waiting for slow RPC to get done.");
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
System.out.println("Down slow rpc testing");
}
}
BooleanVerifier
@Test(timeout=30000) public void testRPCInterrupted() throws IOException, InterruptedException {
final Configuration conf=new Configuration();
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(null).build();
server.start();
try {
int numConcurrentRPC=200;
InetSocketAddress addr=NetUtils.getConnectAddress(server);
final CyclicBarrier barrier=new CyclicBarrier(numConcurrentRPC);
final CountDownLatch latch=new CountDownLatch(numConcurrentRPC);
final AtomicBoolean leaderRunning=new AtomicBoolean(true);
final AtomicReference error=new AtomicReference();
Thread leaderThread=null;
for (int i=0; i < numConcurrentRPC; i++) {
final int num=i;
final TestProtocol proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
Thread rpcThread=new Thread(new Runnable(){
@Override public void run(){
try {
barrier.await();
while (num == 0 || leaderRunning.get()) {
proxy.slowPing(false);
}
proxy.slowPing(false);
}
catch ( Exception e) {
if (num == 0) {
leaderRunning.set(false);
}
else {
error.set(e);
}
LOG.error("thread " + num,e);
}
finally {
latch.countDown();
}
}
}
);
rpcThread.start();
if (leaderThread == null) {
leaderThread=rpcThread;
}
}
Thread.sleep(1000);
while (leaderRunning.get()) {
leaderThread.interrupt();
}
latch.await();
assertTrue("rpc got exception " + error.get(),error.get() == null);
}
finally {
server.stop();
}
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that server.stop() properly stops all threads
*/
@Test public void testStopsAllThreads() throws IOException, InterruptedException {
int threadsBefore=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads running before test",0,threadsBefore);
final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
try {
int threadsRunning=0;
long totalSleepTime=0;
do {
totalSleepTime+=10;
Thread.sleep(10);
threadsRunning=countThreads("Server$Listener$Reader");
}
while (threadsRunning == 0 && totalSleepTime < 5000);
threadsRunning=countThreads("Server$Listener$Reader");
assertTrue(threadsRunning > 0);
}
finally {
server.stop();
}
int threadsAfter=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads left running after test",0,threadsAfter);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER));
Assert.assertEquals(0,resp.getProtocolSignatureCount());
resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1,resp.getProtocolSignatureCount());
ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID,sig.getVersion());
boolean found=false;
int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class));
for ( int m : sig.getMethodsList()) {
if (expected == m) {
found=true;
break;
}
}
Assert.assertTrue(found);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHashCode() throws Exception {
Method strMethod=TestProtocol3.class.getMethod("echo",String.class);
int stringEchoHash=ProtocolSignature.getFingerprint(strMethod);
Method intMethod=TestProtocol3.class.getMethod("echo",int.class);
int intEchoHash=ProtocolSignature.getFingerprint(intMethod);
assertFalse(stringEchoHash == intEchoHash);
int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class));
assertEquals(intEchoHash,intEchoHash1);
int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class));
assertFalse(stringEchoHash == stringEchoHash1);
int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class));
assertFalse(intEchoHash == intEchoHashAlias);
int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class));
assertFalse(intEchoHash == intEchoHash2);
int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod});
int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod});
assertEquals(hash1,hash2);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testVersionMismatch() throws IOException {
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.start();
addr=NetUtils.getConnectAddress(server);
TestProtocol4 proxy=RPC.getProxy(TestProtocol4.class,TestProtocol4.versionID,addr,conf);
try {
proxy.echo(21);
fail("The call must throw VersionMismatch exception");
}
catch ( RemoteException ex) {
Assert.assertEquals(RPC.VersionMismatch.class.getName(),ex.getClassName());
Assert.assertTrue(ex.getErrorCode().equals(RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
}
catch ( IOException ex) {
fail("Expected version mismatch but got " + ex);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testIsMethodSupported() throws IOException {
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.start();
addr=NetUtils.getConnectAddress(server);
TestProtocol2 proxy=RPC.getProxy(TestProtocol2.class,TestProtocol2.versionID,addr,conf);
boolean supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_WRITABLE,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertTrue(supported);
supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertFalse(supported);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testErrorMessage() throws Exception {
BadTokenSecretManager sm=new BadTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
boolean succeeded=false;
try {
doDigestRpc(server,sm);
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals(ERROR_MESSAGE,e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
succeeded=true;
}
assertTrue(succeeded);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
Configuration newConf=new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"");
Client client=null;
TestSaslProtocol proxy1=null;
TestSaslProtocol proxy2=null;
TestSaslProtocol proxy3=null;
int timeouts[]={111222,3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]);
proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy1.getAuthMethod();
client=WritableRpcEngine.getClient(newConf);
Set conns=client.getConnectionIds();
assertEquals("number of connections in cache is wrong",1,conns.size());
proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong",1,conns.size());
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]);
proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong",2,conns.size());
ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)};
assertEquals(connsArray[0],connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]);
}
finally {
server.stop();
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
BooleanVerifier
@Test public void testBindError() throws Exception {
Configuration conf=new Configuration();
ServerSocket socket=new ServerSocket();
InetSocketAddress address=new InetSocketAddress("0.0.0.0",0);
socket.bind(address);
try {
int min=socket.getLocalPort();
conf.set("TestRange",min + "-" + min);
ServerSocket socket2=new ServerSocket();
InetSocketAddress address2=new InetSocketAddress("0.0.0.0",0);
boolean caught=false;
try {
Server.bind(socket2,address2,10,conf,"TestRange");
}
catch ( BindException e) {
caught=true;
}
finally {
socket2.close();
}
assertTrue("Failed to catch the expected bind exception",caught);
}
finally {
socket.close();
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testBind() throws Exception {
Configuration conf=new Configuration();
ServerSocket socket=new ServerSocket();
InetSocketAddress address=new InetSocketAddress("0.0.0.0",0);
socket.bind(address);
try {
int min=socket.getLocalPort();
int max=min + 100;
conf.set("TestRange",min + "-" + max);
ServerSocket socket2=new ServerSocket();
InetSocketAddress address2=new InetSocketAddress("0.0.0.0",0);
Server.bind(socket2,address2,10,conf,"TestRange");
try {
assertTrue(socket2.isBound());
assertTrue(socket2.getLocalPort() > min);
assertTrue(socket2.getLocalPort() <= max);
}
finally {
socket2.close();
}
}
finally {
socket.close();
}
}
BooleanVerifier
@Test public void testBindSimple() throws Exception {
ServerSocket socket=new ServerSocket();
InetSocketAddress address=new InetSocketAddress("0.0.0.0",0);
Server.bind(socket,address,10);
try {
assertTrue(socket.isBound());
}
finally {
socket.close();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testExceptionsHandler(){
Server.ExceptionsHandler handler=new Server.ExceptionsHandler();
handler.addTerseExceptions(IOException.class);
handler.addTerseExceptions(RpcServerException.class,IpcException.class);
assertTrue(handler.isTerse(IOException.class));
assertTrue(handler.isTerse(RpcServerException.class));
assertTrue(handler.isTerse(IpcException.class));
assertFalse(handler.isTerse(RpcClientException.class));
}
BooleanVerifier
@Test public void testEmptyConfig() throws Exception {
Configuration conf=new Configuration();
conf.set("TestRange","");
ServerSocket socket=new ServerSocket();
InetSocketAddress address=new InetSocketAddress("0.0.0.0",0);
try {
Server.bind(socket,address,10,conf,"TestRange");
assertTrue(socket.isBound());
}
finally {
socket.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void callable() throws Exception {
C c=new C();
RunnableCallable rc=new RunnableCallable(c);
rc.run();
assertTrue(c.RUN);
c=new C();
rc=new RunnableCallable(c);
rc.call();
assertTrue(c.RUN);
assertEquals(rc.toString(),"C");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void runnable() throws Exception {
R r=new R();
RunnableCallable rc=new RunnableCallable(r);
rc.run();
assertTrue(r.RUN);
r=new R();
rc=new RunnableCallable(r);
rc.call();
assertTrue(r.RUN);
assertEquals(rc.toString(),"R");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void baseService() throws Exception {
BaseService service=new MyService();
assertNull(service.getInterface());
assertEquals(service.getPrefix(),"myservice");
assertEquals(service.getServiceDependencies().length,0);
Server server=Mockito.mock(Server.class);
Configuration conf=new Configuration(false);
conf.set("server.myservice.foo","FOO");
conf.set("server.myservice1.bar","BAR");
Mockito.when(server.getConfig()).thenReturn(conf);
Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
service.init(server);
assertEquals(service.getPrefixedName("foo"),"server.myservice.foo");
assertEquals(service.getServiceConfig().size(),1);
assertEquals(service.getServiceConfig().get("foo"),"FOO");
assertTrue(MyService.INIT);
}
BooleanVerifier
@Test @TestDir public void changeStatus() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=createServer(conf);
server.init();
server.setStatus(Server.Status.ADMIN);
assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S05.*") @TestDir public void siteFileNotAFile() throws Exception {
String homeDir=TestDirHelper.getTestDir().getAbsolutePath();
File siteFile=new File(homeDir,"server-site.xml");
assertTrue(siteFile.mkdir());
Server server=new Server("server",homeDir,homeDir,homeDir,homeDir);
server.init();
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S01.*") @TestDir public void initNoConfigDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"log").mkdir());
assertTrue(new File(homeDir,"temp").mkdir());
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S01.*") @TestDir public void initNoLogDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"conf").mkdir());
assertTrue(new File(homeDir,"temp").mkdir());
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
BooleanVerifier
@Test @TestDir public void setSameStatus() throws Exception {
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=createServer(conf);
server.init();
TestService.LIFECYCLE.clear();
server.setStatus(server.getStatus());
assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S02.*") @TestDir public void initLogDirNotDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"conf").mkdir());
assertTrue(new File(homeDir,"temp").mkdir());
File logDir=new File(homeDir,"log");
new FileOutputStream(logDir).close();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S01.*") @TestDir public void initNoTempDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"conf").mkdir());
assertTrue(new File(homeDir,"log").mkdir());
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S02.*") @TestDir public void initTempDirNotDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"conf").mkdir());
assertTrue(new File(homeDir,"log").mkdir());
File tempDir=new File(homeDir,"temp");
new FileOutputStream(tempDir).close();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
BooleanVerifier
@Test @TestException(exception=ServerException.class,msgRegExp="S02.*") @TestDir public void initConfigDirNotDir() throws Exception {
File homeDir=new File(TestDirHelper.getTestDir(),"home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir,"log").mkdir());
assertTrue(new File(homeDir,"temp").mkdir());
File configDir=new File(homeDir,"conf");
new FileOutputStream(configDir).close();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=new Server("server",homeDir.getAbsolutePath(),conf);
server.init();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test @TestDir @SuppressWarnings("unchecked") public void sampling() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Instrumentation instrumentation=server.get(Instrumentation.class);
final AtomicInteger count=new AtomicInteger();
Instrumentation.Variable varToSample=new Instrumentation.Variable(){
@Override public Long getValue(){
return (long)count.incrementAndGet();
}
}
;
instrumentation.addSampler("g","s",10,varToSample);
sleep(2000);
int i=count.get();
assertTrue(i > 0);
Map> snapshot=instrumentation.getSnapshot();
Map> samplers=(Map>)snapshot.get("samplers");
InstrumentationService.Sampler sampler=(InstrumentationService.Sampler)samplers.get("g").get("s");
assertTrue(sampler.getRate() > 0);
server.destroy();
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void testMissingHostname() throws Exception {
ServletRequest request=Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn(null);
ServletResponse response=Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked=new AtomicBoolean();
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertTrue(HostnameFilter.get().contains("???"));
invoked.set(true);
}
}
;
Filter filter=new HostnameFilter();
filter.init(null);
assertNull(HostnameFilter.get());
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void hostname() throws Exception {
ServletRequest request=Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("localhost");
ServletResponse response=Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked=new AtomicBoolean();
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertTrue(HostnameFilter.get().contains("localhost") || HostnameFilter.get().contains("127.0.0.1"));
invoked.set(true);
}
}
;
Filter filter=new HostnameFilter();
filter.init(null);
assertNull(HostnameFilter.get());
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}
BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void mdc() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
Mockito.when(request.getMethod()).thenReturn("METHOD");
Mockito.when(request.getPathInfo()).thenReturn("/pathinfo");
ServletResponse response=Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked=new AtomicBoolean();
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"),null);
assertEquals(MDC.get("user"),null);
assertEquals(MDC.get("method"),"METHOD");
assertEquals(MDC.get("path"),"/pathinfo");
invoked.set(true);
}
}
;
MDC.clear();
Filter filter=new MDCFilter();
filter.init(null);
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
assertNull(MDC.get("hostname"));
assertNull(MDC.get("user"));
assertNull(MDC.get("method"));
assertNull(MDC.get("path"));
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal(){
@Override public String getName(){
return "name";
}
}
);
invoked.set(false);
chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"),null);
assertEquals(MDC.get("user"),"name");
assertEquals(MDC.get("method"),"METHOD");
assertEquals(MDC.get("path"),"/pathinfo");
invoked.set(true);
}
}
;
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.set("HOST");
invoked.set(false);
chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
assertEquals(MDC.get("hostname"),"HOST");
assertEquals(MDC.get("user"),"name");
assertEquals(MDC.get("method"),"METHOD");
assertEquals(MDC.get("path"),"/pathinfo");
invoked.set(true);
}
}
;
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.remove();
filter.destroy();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void test() throws Exception {
JSONMapProvider p=new JSONMapProvider();
assertTrue(p.isWriteable(Map.class,null,null,null));
assertFalse(p.isWriteable(this.getClass(),null,null,null));
assertEquals(p.getSize(null,null,null,null,null),-1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JSONObject json=new JSONObject();
json.put("a","A");
p.writeTo(json,JSONObject.class,null,null,null,null,baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void test() throws Exception {
JSONProvider p=new JSONProvider();
assertTrue(p.isWriteable(JSONObject.class,null,null,null));
assertFalse(p.isWriteable(this.getClass(),null,null,null));
assertEquals(p.getSize(null,null,null,null,null),-1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JSONObject json=new JSONObject();
json.put("a","A");
p.writeTo(json,JSONObject.class,null,null,null,null,baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNestedException() throws Throwable {
Exception e=new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe=new IOException("Datacenter problems",e);
ThrowableInformation ti=new ThrowableInformation(ioe);
Log4Json l4j=new Log4Json();
long timeStamp=Time.now();
String outcome=l4j.toJson(new StringWriter(),"testNestedException",timeStamp,"INFO","quoted\"","new line\n and {}",ti).toString();
println("testNestedException",outcome);
ContainerNode rootNode=Log4Json.parse(outcome);
assertEntryEquals(rootNode,Log4Json.LEVEL,"INFO");
assertEntryEquals(rootNode,Log4Json.NAME,"testNestedException");
assertEntryEquals(rootNode,Log4Json.TIME,timeStamp);
assertEntryEquals(rootNode,Log4Json.EXCEPTION_CLASS,ioe.getClass().getName());
JsonNode node=assertNodeContains(rootNode,Log4Json.STACK);
assertTrue("Not an array: " + node,node.isArray());
node=assertNodeContains(rootNode,Log4Json.DATE);
assertTrue("Not a string: " + node,node.isTextual());
String dateText=node.getTextValue();
assertTrue("No '-' in " + dateText,dateText.contains("-"));
assertTrue("No '-' in " + dateText,dateText.contains(":"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testShowJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
long startTime=System.currentTimeMillis();
JobID jobID=new JobID(String.valueOf(startTime),12345);
JobStatus mockJobStatus=mock(JobStatus.class);
when(mockJobStatus.getJobID()).thenReturn(jobID);
when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
when(mockJobStatus.getStartTime()).thenReturn(startTime);
when(mockJobStatus.getUsername()).thenReturn("mockuser");
when(mockJobStatus.getQueue()).thenReturn("mockqueue");
when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
when(mockJobStatus.getUsedMem()).thenReturn(1024);
when(mockJobStatus.getReservedMem()).thenReturn(512);
when(mockJobStatus.getNeededMem()).thenReturn(2048);
when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");
Job mockJob=mock(Job.class);
when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn(new TaskReport[5]);
Cluster mockCluster=mock(Cluster.class);
when(mockCluster.getJob(jobID)).thenReturn(mockJob);
client.setCluster(mockCluster);
ByteArrayOutputStream out=new ByteArrayOutputStream();
client.displayJobList(new JobStatus[]{mockJobStatus},new PrintWriter(out));
String commandLineOutput=out.toString();
System.out.println(commandLineOutput);
Assert.assertTrue(commandLineOutput.contains("Total jobs:1"));
verify(mockJobStatus,atLeastOnce()).getJobID();
verify(mockJobStatus).getState();
verify(mockJobStatus).getStartTime();
verify(mockJobStatus).getUsername();
verify(mockJobStatus).getQueue();
verify(mockJobStatus).getPriority();
verify(mockJobStatus).getNumUsedSlots();
verify(mockJobStatus).getNumReservedSlots();
verify(mockJobStatus).getUsedMem();
verify(mockJobStatus).getReservedMem();
verify(mockJobStatus).getNeededMem();
verify(mockJobStatus).getSchedulingInfo();
verify(mockCluster,never()).getJob(jobID);
verify(mockJob,never()).getTaskReports(isA(TaskType.class));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRedirect() throws Exception {
Configuration conf=new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS,RMADDRESS);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS,HSHOSTADDRESS);
RMService rmService=new RMService("test");
rmService.init(conf);
rmService.start();
AMService amService=new AMService();
amService.init(conf);
amService.start(conf);
HistoryService historyService=new HistoryService();
historyService.init(conf);
historyService.start(conf);
LOG.info("services started");
Cluster cluster=new Cluster(conf);
org.apache.hadoop.mapreduce.JobID jobID=new org.apache.hadoop.mapred.JobID("201103121733",1);
org.apache.hadoop.mapreduce.Counters counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately..");
Thread.sleep(5000);
amService.stop();
LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly..");
Thread.sleep(5000);
amRestarting=true;
counters=cluster.getJob(jobID).getCounters();
Assert.assertEquals(0,counters.countCounters());
Job job=cluster.getJob(jobID);
org.apache.hadoop.mapreduce.TaskID taskId=new org.apache.hadoop.mapreduce.TaskID(jobID,TaskType.MAP,0);
TaskAttemptID tId=new TaskAttemptID(taskId,0);
job.killJob();
job.killTask(tId);
job.failTask(tId);
job.getTaskCompletionEvents(0,100);
job.getStatus();
job.getTaskDiagnostics(tId);
job.getTaskReports(TaskType.MAP);
job.getTrackingURL();
amRestarting=false;
amService=new AMService();
amService.init(conf);
amService.start(conf);
amContact=false;
counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
amService.stop();
counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(hsContact);
rmService.stop();
historyService.stop();
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRemoteExceptionFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(new IOException("Job ID doesnot Exist"));
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
try {
clientServiceDelegate.getJobStatus(oldJobId);
Assert.fail("Invoke should throw exception after retries.");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("Job ID doesnot Exist"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testGraylistedTrackers(){
Assert.assertEquals(0,clusterStatus.getGraylistedTrackers());
Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(conf);
Reporter reporter=Reporter.NULL;
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
InputFormat format=new CombineSequenceFileInputFormat();
IntWritable key=new IntWritable();
BytesWritable value=new BytesWritable();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.getRecordReader(split,job,reporter);
try {
while (reader.next(key,value)) {
assertFalse("Key in multiple partitions.",bits.get(key.get()));
bits.set(key.get());
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
RecordReader reader=format.getRecordReader(split,job,voidReporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.info("splits=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using the raw Inflater codec for reading gzip files.
*/
@Test public void testPrototypeInflaterGzip() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " + "non-native/Java Inflater and manual gzip header/trailer parsing"+ COLOR_NORMAL);
final String fn="concat" + gzip.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
final FileInputStream in=new FileInputStream(fnLocal.toString());
assertEquals("concat bytes available",148,in.available());
byte[] compressedBuf=new byte[256];
int numBytesRead=in.read(compressedBuf,0,10);
assertEquals("header bytes read",10,numBytesRead);
assertEquals("1st byte",0x1f,compressedBuf[0] & 0xff);
assertEquals("2nd byte",0x8b,compressedBuf[1] & 0xff);
assertEquals("3rd byte (compression method)",8,compressedBuf[2] & 0xff);
byte flags=(byte)(compressedBuf[3] & 0xff);
if ((flags & 0x04) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("XLEN bytes read",2,numBytesRead);
int xlen=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
in.skip(xlen);
}
if ((flags & 0x08) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading filename",numBytesRead == -1);
}
}
if ((flags & 0x10) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading comment",numBytesRead == -1);
}
}
if ((flags & 0xe0) != 0) {
assertTrue("reserved bits are set??",(flags & 0xe0) == 0);
}
if ((flags & 0x02) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("CRC16 bytes read",2,numBytesRead);
int crc16=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
}
numBytesRead=in.read(compressedBuf);
byte[] uncompressedBuf=new byte[256];
Inflater inflater=new Inflater(true);
inflater.setInput(compressedBuf,0,numBytesRead);
try {
int numBytesUncompressed=inflater.inflate(uncompressedBuf);
String outString=new String(uncompressedBuf,0,numBytesUncompressed,"UTF-8");
System.out.println("uncompressed data of first gzip member = [" + outString + "]");
}
catch ( java.util.zip.DataFormatException ex) {
throw new IOException(ex.getMessage());
}
in.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testFileSystemGroupIteratorConcurrency(){
Counters counters=new Counters();
counters.findCounter("fs1",FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2",FileSystemCounter.BYTES_READ).increment(1);
Group group=counters.getGroup(FileSystemCounter.class.getName());
Iterator iterator=group.iterator();
counters.findCounter("fs3",FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3",FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMakeCompactString(){
final String GC1="group1.counter1:1";
final String GC2="group2.counter2:3";
Counters counters=new Counters();
counters.incrCounter("group1","counter1",1);
assertEquals("group1.counter1:1",counters.makeCompactString());
counters.incrCounter("group2","counter2",3);
String cs=counters.makeCompactString();
assertTrue("Bad compact string",cs.equals(GC1 + ',' + GC2) || cs.equals(GC2 + ',' + GC1));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitLocationInfo() throws Exception {
Configuration conf=getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
JobConf job=new JobConf(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
fileInputFormat.configure(job);
FileSplit[] splits=(FileSplit[])fileInputFormat.getSplits(job,1);
String[] locations=splits[0].getLocations();
Assert.assertEquals(2,locations.length);
SplitLocationInfo[] locationInfo=splits[0].getLocationInfo();
Assert.assertEquals(2,locationInfo.length);
SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf=new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestErrorOnNonExistantDir(conf,localFs);
JobConf jobConf=new JobConf(conf);
TextInputFormat fif=new TextInputFormat();
fif.configure(jobConf);
try {
fif.listStatus(jobConf);
Assert.fail("Expecting an IOException for a missing Input path");
}
catch ( IOException e) {
Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2");
expectedExceptionPath=localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage());
}
}
BooleanVerifier
/**
* Test with record length set to a negative value
*/
@Test(timeout=5000) public void testNegativeRecordLength() throws IOException {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
JobConf job=new JobConf(defaultConf);
FileInputFormat.setInputPaths(job,workDir);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job,-10);
format.configure(job);
InputSplit splits[]=format.getSplits(job,1);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
RecordReader reader=format.getRecordReader(split,job,voidReporter);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for negative record length:",exceptionThrown);
}
BooleanVerifier
/**
* Test with record length set to 0
*/
@Test(timeout=5000) public void testZeroRecordLength() throws IOException {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
JobConf job=new JobConf(defaultConf);
FileInputFormat.setInputPaths(job,workDir);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job,0);
format.configure(job);
InputSplit splits[]=format.getSplits(job,1);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
RecordReader reader=format.getRecordReader(split,job,voidReporter);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for zero record length:",exceptionThrown);
}
BooleanVerifier
/**
* Test with no record length set.
*/
@Test(timeout=5000) public void testNoRecordLength() throws IOException {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
JobConf job=new JobConf(defaultConf);
FileInputFormat.setInputPaths(job,workDir);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.configure(job);
InputSplit splits[]=format.getSplits(job,1);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
RecordReader reader=format.getRecordReader(split,job,voidReporter);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for not setting record length:",exceptionThrown);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterNoAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),"");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertFalse("random user should not have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertFalse("random user should not have modify access",val);
callerUGI=UserGroupInformation.createUserForTesting(jobOwner,new String[]{});
val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("owner should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("owner should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.set(JobACL.MODIFY_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String clusterAdmin="testuser2";
conf.set(MRConfig.MR_ADMINS,clusterAdmin);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(clusterAdmin,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("cluster admin should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("cluster admin should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testAclsOff(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("acls off so anyone should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testGroups(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String user="testuser2";
String adminGroup="adminGroup";
conf.set(MRConfig.MR_ADMINS," " + adminGroup);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(user,new String[]{adminGroup});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testIsJobDirValid() throws IOException {
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
Path testDir=new Path(TEST_DIR);
fs.mkdirs(testDir);
Assert.assertFalse(JobClient.isJobDirValid(testDir,fs));
Path jobconf=new Path(testDir,"job.xml");
Path jobsplit=new Path(testDir,"job.split");
fs.create(jobconf);
fs.create(jobsplit);
Assert.assertTrue(JobClient.isJobDirValid(testDir,fs));
fs.delete(jobconf,true);
fs.delete(jobsplit,true);
}
BooleanVerifier
@Test(timeout=10000) public void testGetStagingAreaDir() throws IOException, InterruptedException {
Configuration conf=new Configuration();
JobClient client=new JobClient(conf);
Assert.assertTrue("Mismatch in paths",client.getClusterHandle().getStagingAreaDir().toString().equals(client.getStagingAreaDir().toString()));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test getters and setters of JobConf
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobConf(){
JobConf conf=new JobConf();
Pattern pattern=conf.getJarUnpackPattern();
assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),pattern.toString());
assertFalse(conf.getKeepFailedTaskFiles());
conf.setKeepFailedTaskFiles(true);
assertTrue(conf.getKeepFailedTaskFiles());
assertNull(conf.getKeepTaskFilesPattern());
conf.setKeepTaskFilesPattern("123454");
assertEquals("123454",conf.getKeepTaskFilesPattern());
assertNotNull(conf.getWorkingDirectory());
conf.setWorkingDirectory(new Path("test"));
assertTrue(conf.getWorkingDirectory().toString().endsWith("test"));
assertEquals(1,conf.getNumTasksToExecutePerJvm());
assertNull(conf.getKeyFieldComparatorOption());
conf.setKeyFieldComparatorOptions("keySpec");
assertEquals("keySpec",conf.getKeyFieldComparatorOption());
assertFalse(conf.getUseNewReducer());
conf.setUseNewReducer(true);
assertTrue(conf.getUseNewReducer());
assertTrue(conf.getMapSpeculativeExecution());
assertTrue(conf.getReduceSpeculativeExecution());
assertTrue(conf.getSpeculativeExecution());
conf.setReduceSpeculativeExecution(false);
assertTrue(conf.getSpeculativeExecution());
conf.setMapSpeculativeExecution(false);
assertFalse(conf.getSpeculativeExecution());
assertFalse(conf.getMapSpeculativeExecution());
assertFalse(conf.getReduceSpeculativeExecution());
conf.setSessionId("ses");
assertEquals("ses",conf.getSessionId());
assertEquals(3,conf.getMaxTaskFailuresPerTracker());
conf.setMaxTaskFailuresPerTracker(2);
assertEquals(2,conf.getMaxTaskFailuresPerTracker());
assertEquals(0,conf.getMaxMapTaskFailuresPercent());
conf.setMaxMapTaskFailuresPercent(50);
assertEquals(50,conf.getMaxMapTaskFailuresPercent());
assertEquals(0,conf.getMaxReduceTaskFailuresPercent());
conf.setMaxReduceTaskFailuresPercent(70);
assertEquals(70,conf.getMaxReduceTaskFailuresPercent());
assertEquals(JobPriority.NORMAL.name(),conf.getJobPriority().name());
conf.setJobPriority(JobPriority.HIGH);
assertEquals(JobPriority.HIGH.name(),conf.getJobPriority().name());
assertNull(conf.getJobSubmitHostName());
conf.setJobSubmitHostName("hostname");
assertEquals("hostname",conf.getJobSubmitHostName());
assertNull(conf.getJobSubmitHostAddress());
conf.setJobSubmitHostAddress("ww");
assertEquals("ww",conf.getJobSubmitHostAddress());
assertFalse(conf.getProfileEnabled());
conf.setProfileEnabled(true);
assertTrue(conf.getProfileEnabled());
assertEquals(conf.getProfileTaskRange(true).toString(),"0-2");
assertEquals(conf.getProfileTaskRange(false).toString(),"0-2");
conf.setProfileTaskRange(true,"0-3");
assertEquals(conf.getProfileTaskRange(false).toString(),"0-2");
assertEquals(conf.getProfileTaskRange(true).toString(),"0-3");
assertNull(conf.getMapDebugScript());
conf.setMapDebugScript("mDbgScript");
assertEquals("mDbgScript",conf.getMapDebugScript());
assertNull(conf.getReduceDebugScript());
conf.setReduceDebugScript("rDbgScript");
assertEquals("rDbgScript",conf.getReduceDebugScript());
assertNull(conf.getJobLocalDir());
assertEquals("default",conf.getQueueName());
conf.setQueueName("qname");
assertEquals("qname",conf.getQueueName());
conf.setMemoryForMapTask(100 * 1000);
assertEquals(100 * 1000,conf.getMemoryForMapTask());
conf.setMemoryForReduceTask(1000 * 1000);
assertEquals(1000 * 1000,conf.getMemoryForReduceTask());
assertEquals(-1,conf.getMaxPhysicalMemoryForTask());
assertEquals("The variable key is no longer used.",JobConf.deprecatedString("key"));
assertEquals("mapreduce.map.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS));
assertEquals("mapreduce.reduce.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS));
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterC() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,3);
createWordsFile(inFiles[3],conf);
createWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
inputSize+=getFileSize(inFiles[3]);
inputSize+=getFileSize(inFiles[4]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN2"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,122880,25600,102400);
validateFileCounters(c1,inputSize,0,0,0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterD() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
job.setNumReduceTasks(0);
removeWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN3"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,0,15360,61440);
validateFileCounters(c1,inputSize,0,-1,-1);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterA() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
removeWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN0"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,73728,15360,61440);
validateFileCounters(c1,inputSize,0,0,0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterB() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
createWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
inputSize+=getFileSize(inFiles[3]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN1"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,98304,20480,81920);
validateFileCounters(c1,inputSize,0,0,0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}.
* The test consists of running a low-memory job which consumes less heap
* memory and then running a high-memory job which consumes more heap memory,
* and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller
* than that of the high-memory job.
* @throws IOException
*/
@Test @SuppressWarnings("deprecation") public void testHeapUsageCounter() throws Exception {
JobConf conf=new JobConf();
FileSystem fileSystem=FileSystem.getLocal(conf);
Path rootDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testRootDir=new Path(rootDir,"testHeapUsageCounter");
fileSystem.delete(testRootDir,true);
fileSystem.setWorkingDirectory(testRootDir);
fileSystem.deleteOnExit(testRootDir);
MiniMRCluster mrCluster=new MiniMRCluster(1,fileSystem.getUri().toString(),1);
try {
conf=mrCluster.createJobConf();
JobClient jobClient=new JobClient(conf);
Path inDir=new Path(testRootDir,"in");
createWordsFile(inDir,conf);
RunningJob lowMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",0,0,fileSystem,jobClient,inDir);
JobID lowMemJobID=lowMemJob.getID();
long lowMemJobMapHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.MAP);
System.out.println("Job1 (low memory job) map task heap usage: " + lowMemJobMapHeapUsage);
long lowMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job1 (low memory job) reduce task heap usage: " + lowMemJobReduceHeapUsage);
RunningJob highMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",lowMemJobMapHeapUsage + 256 * 1024 * 1024,lowMemJobReduceHeapUsage + 256 * 1024 * 1024,fileSystem,jobClient,inDir);
JobID highMemJobID=highMemJob.getID();
long highMemJobMapHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.MAP);
System.out.println("Job2 (high memory job) map task heap usage: " + highMemJobMapHeapUsage);
long highMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job2 (high memory job) reduce task heap usage: " + highMemJobReduceHeapUsage);
assertTrue("Incorrect map heap usage reported by the map task",lowMemJobMapHeapUsage < highMemJobMapHeapUsage);
assertTrue("Incorrect reduce heap usage reported by the reduce task",lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
}
finally {
mrCluster.shutdown();
try {
fileSystem.delete(testRootDir,true);
}
catch ( IOException ioe) {
}
}
}
BooleanVerifier
/**
* Test that print job queue recursively prints child queues
*/
@Test @SuppressWarnings("deprecation") public void testPrintJobQueueInfo() throws IOException {
JobQueueClient queueClient=new JobQueueClient();
JobQueueInfo parent=new JobQueueInfo();
JobQueueInfo child=new JobQueueInfo();
JobQueueInfo grandChild=new JobQueueInfo();
child.addChild(grandChild);
parent.addChild(child);
grandChild.setQueueName("GrandChildQueue");
ByteArrayOutputStream bbos=new ByteArrayOutputStream();
PrintWriter writer=new PrintWriter(bbos);
queueClient.printJobQueueInfo(parent,writer);
Assert.assertTrue("printJobQueueInfo did not print grandchild's name",bbos.toString().contains("GrandChildQueue"));
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void testStripBOM() throws IOException {
String UTF8_BOM="\uFEFF";
URL testFileUrl=getClass().getClassLoader().getResource("testBOM.txt");
assertNotNull("Cannot find testBOM.txt",testFileUrl);
File testFile=new File(testFileUrl.getFile());
Path testFilePath=new Path(testFile.getAbsolutePath());
long testFileSize=testFile.length();
Configuration conf=new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,Integer.MAX_VALUE);
FileSplit split=new FileSplit(testFilePath,0,testFileSize,(String[])null);
LineRecordReader reader=new LineRecordReader(conf,split);
LongWritable key=new LongWritable();
Text value=new Text();
int numRecords=0;
boolean firstLine=true;
boolean skipBOM=true;
while (reader.next(key,value)) {
if (firstLine) {
firstLine=false;
if (value.toString().startsWith(UTF8_BOM)) {
skipBOM=false;
}
}
++numRecords;
}
reader.close();
assertTrue("BOM is not skipped",skipBOM);
}
BooleanVerifier
@Test public void testDuplicateDownload() throws Exception {
JobConf conf=new JobConf();
conf.setClass("fs.mock.impl",MockFileSystem.class,FileSystem.class);
URI mockBase=new URI("mock://test-nn1/");
when(mockfs.getUri()).thenReturn(mockBase);
Path working=new Path("mock://test-nn1/user/me/");
when(mockfs.getWorkingDirectory()).thenReturn(working);
when(mockfs.resolvePath(any(Path.class))).thenAnswer(new Answer(){
@Override public Path answer( InvocationOnMock args) throws Throwable {
return (Path)args.getArguments()[0];
}
}
);
final URI file=new URI("mock://test-nn1/user/me/file.txt#link");
final Path filePath=new Path(file);
File link=new File("link");
when(mockfs.getFileStatus(any(Path.class))).thenAnswer(new Answer(){
@Override public FileStatus answer( InvocationOnMock args) throws Throwable {
Path p=(Path)args.getArguments()[0];
if ("file.txt".equals(p.getName())) {
return new FileStatus(201,false,1,500,101,101,FsPermission.getDefault(),"me","me",filePath);
}
else {
throw new FileNotFoundException(p + " not supported by mocking");
}
}
}
);
when(mockfs.getConf()).thenReturn(conf);
final FSDataInputStream in=new FSDataInputStream(new MockInputStream("This is a test file\n".getBytes()));
when(mockfs.open(any(Path.class),anyInt())).thenAnswer(new Answer(){
@Override public FSDataInputStream answer( InvocationOnMock args) throws Throwable {
Path src=(Path)args.getArguments()[0];
if ("file.txt".equals(src.getName())) {
return in;
}
else {
throw new FileNotFoundException(src + " not supported by mocking");
}
}
}
);
DistributedCache.addCacheFile(file,conf);
DistributedCache.addCacheFile(file,conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"101,101");
conf.set(MRJobConfig.CACHE_FILES_SIZES,"201,201");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"false,false");
conf.set(MRConfig.LOCAL_DIR,localDir.getAbsolutePath());
LocalDistributedCacheManager manager=new LocalDistributedCacheManager();
try {
manager.setup(conf);
assertTrue(link.exists());
}
finally {
manager.close();
}
assertFalse(link.exists());
}
BooleanVerifier
@Test public void testDownload() throws Exception {
JobConf conf=new JobConf();
conf.setClass("fs.mock.impl",MockFileSystem.class,FileSystem.class);
URI mockBase=new URI("mock://test-nn1/");
when(mockfs.getUri()).thenReturn(mockBase);
Path working=new Path("mock://test-nn1/user/me/");
when(mockfs.getWorkingDirectory()).thenReturn(working);
when(mockfs.resolvePath(any(Path.class))).thenAnswer(new Answer(){
@Override public Path answer( InvocationOnMock args) throws Throwable {
return (Path)args.getArguments()[0];
}
}
);
final URI file=new URI("mock://test-nn1/user/me/file.txt#link");
final Path filePath=new Path(file);
File link=new File("link");
when(mockfs.getFileStatus(any(Path.class))).thenAnswer(new Answer(){
@Override public FileStatus answer( InvocationOnMock args) throws Throwable {
Path p=(Path)args.getArguments()[0];
if ("file.txt".equals(p.getName())) {
return new FileStatus(201,false,1,500,101,101,FsPermission.getDefault(),"me","me",filePath);
}
else {
throw new FileNotFoundException(p + " not supported by mocking");
}
}
}
);
when(mockfs.getConf()).thenReturn(conf);
final FSDataInputStream in=new FSDataInputStream(new MockInputStream("This is a test file\n".getBytes()));
when(mockfs.open(any(Path.class),anyInt())).thenAnswer(new Answer(){
@Override public FSDataInputStream answer( InvocationOnMock args) throws Throwable {
Path src=(Path)args.getArguments()[0];
if ("file.txt".equals(src.getName())) {
return in;
}
else {
throw new FileNotFoundException(src + " not supported by mocking");
}
}
}
);
DistributedCache.addCacheFile(file,conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"101");
conf.set(MRJobConfig.CACHE_FILES_SIZES,"201");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"false");
conf.set(MRConfig.LOCAL_DIR,localDir.getAbsolutePath());
LocalDistributedCacheManager manager=new LocalDistributedCacheManager();
try {
manager.setup(conf);
assertTrue(link.exists());
}
finally {
manager.close();
}
assertFalse(link.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDeprecatedFunctions() throws Exception {
DistributedCache.addLocalArchives(conf,"Test Local Archives 1");
Assert.assertEquals("Test Local Archives 1",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 1",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalArchives(conf,"Test Local Archives 2");
Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(2,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 2",DistributedCache.getLocalCacheArchives(conf)[1].getName());
DistributedCache.setLocalArchives(conf,"Test Local Archives 3");
Assert.assertEquals("Test Local Archives 3",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 3",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 1");
Assert.assertEquals("Test Local Files 1",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 1",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 2");
Assert.assertEquals("Test Local Files 1,Test Local Files 2",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(2,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 2",DistributedCache.getLocalCacheFiles(conf)[1].getName());
DistributedCache.setLocalFiles(conf,"Test Local Files 3");
Assert.assertEquals("Test Local Files 3",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 3",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.setArchiveTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getArchiveTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getArchiveTimestamps(conf)[0]);
DistributedCache.setFileTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getFileTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getFileTimestamps(conf)[0]);
DistributedCache.createAllSymlink(conf,new File("Test Job Cache Dir"),new File("Test Work Dir"));
Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK));
Assert.assertTrue(DistributedCache.getSymlink(conf));
Assert.assertTrue(symlinkFile.createNewFile());
FileStatus fileStatus=DistributedCache.getFileStatus(conf,symlinkFile.toURI());
Assert.assertNotNull(fileStatus);
Assert.assertEquals(fileStatus.getModificationTime(),DistributedCache.getTimestamp(conf,symlinkFile.toURI()));
Assert.assertTrue(symlinkFile.delete());
DistributedCache.addCacheArchive(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_ARCHIVES));
Assert.assertEquals(1,DistributedCache.getCacheArchives(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheArchives(conf)[0]);
DistributedCache.addCacheFile(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_FILES));
Assert.assertEquals(1,DistributedCache.getCacheFiles(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheFiles(conf)[0]);
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testNullKeys() throws Exception {
JobConf conf=new JobConf(TestMapRed.class);
FileSystem fs=FileSystem.getLocal(conf);
HashSet values=new HashSet();
String m="AAAAAAAAAAAAAA";
for (int i=1; i < 11; ++i) {
values.add(m);
m=m.replace((char)('A' + i - 1),(char)('A' + i));
}
Path testdir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(fs);
fs.delete(testdir,true);
Path inFile=new Path(testdir,"nullin/blah");
SequenceFile.Writer w=SequenceFile.createWriter(fs,conf,inFile,NullWritable.class,Text.class,SequenceFile.CompressionType.NONE);
Text t=new Text();
for ( String s : values) {
t.set(s);
w.append(NullWritable.get(),t);
}
w.close();
FileInputFormat.setInputPaths(conf,inFile);
FileOutputFormat.setOutputPath(conf,new Path(testdir,"nullout"));
conf.setMapperClass(NullMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(NullWritable.class);
conf.setOutputValueClass(Text.class);
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.setNumReduceTasks(1);
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME);
JobClient.runJob(conf);
SequenceFile.Reader r=new SequenceFile.Reader(fs,new Path(testdir,"nullout/part-00000"),conf);
m="AAAAAAAAAAAAAA";
for (int i=1; r.next(NullWritable.get(),t); ++i) {
assertTrue("Unexpected value: " + t,values.remove(t.toString()));
m=m.replace((char)('A' + i - 1),(char)('A' + i));
}
assertTrue("Missing values: " + values.toString(),values.isEmpty());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* To test OS dependent setting of default execution path for a MapRed task.
* Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -
* for WINDOWS: %HADOOP_COMMON_HOME%\bin is expected to be included in PATH - for
* Linux: $HADOOP_COMMON_HOME/lib/native is expected to be included in
* LD_LIBRARY_PATH
*/
@Test public void testMapRedExecutionEnv(){
try {
Map environment=new HashMap();
String setupHadoopHomeCommand=Shell.WINDOWS ? "HADOOP_COMMON_HOME=C:\\fake\\PATH\\to\\hadoop\\common\\home" : "HADOOP_COMMON_HOME=/fake/path/to/hadoop/common/home";
MRApps.setEnvFromInputString(environment,setupHadoopHomeCommand,conf);
MRApps.setEnvFromInputString(environment,conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV,MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV),conf);
String executionPaths=environment.get(Shell.WINDOWS ? "PATH" : "LD_LIBRARY_PATH");
String toFind=Shell.WINDOWS ? "C:\\fake\\PATH\\to\\hadoop\\common\\home\\bin" : "/fake/path/to/hadoop/common/home/lib/native";
assertTrue("execution path does not include the hadoop lib location " + toFind,executionPaths.contains(toFind));
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing execution environment for MapReduce task");
tearDown();
}
try {
JobConf conf=new JobConf(mr.getConfig());
Path inDir=new Path("input");
Path outDir=new Path("output");
String input="The input";
configure(conf,inDir,outDir,input,ExecutionEnvCheckMapClass.class,IdentityReducer.class);
launchTest(conf,inDir,outDir,input);
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing propagation of env setting to child task");
tearDown();
}
}
BooleanVerifier
@Test public void testJob() throws Exception {
final Job job=createJob();
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,inDir);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(outDir,"testJob"));
assertTrue(job.waitForCompletion(true));
validateCounters(job.getCounters(),5,25,5,5);
}
BooleanVerifier
/**
* test run from command line JobQueueClient
* @throws Exception
*/
@Test(timeout=500000) public void testJobQueueClient() throws Exception {
MiniMRClientCluster mr=null;
FileSystem fileSys=null;
PrintStream oldOut=System.out;
try {
mr=createMiniClusterWithCapacityScheduler();
JobConf job=new JobConf(mr.getConfig());
fileSys=FileSystem.get(job);
fileSys.delete(testDir,true);
FSDataOutputStream out=fileSys.create(inFile,true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job,inFile);
FileOutputFormat.setOutputPath(job,outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client=new JobClient(mr.getConfig());
client.submitJob(job);
JobQueueClient jobClient=new JobQueueClient(job);
ByteArrayOutputStream bytes=new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg={"-list"};
jobClient.run(arg);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
bytes=new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg1={"-showacls"};
jobClient.run(arg1);
assertTrue(bytes.toString().contains("Queue acls for user :"));
assertTrue(bytes.toString().contains("root ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
assertTrue(bytes.toString().contains("default ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
bytes=new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg2={"-info","default"};
jobClient.run(arg2);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
bytes=new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg3={"-info","default","-showJobs"};
jobClient.run(arg3);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
assertTrue(bytes.toString().contains("job_1"));
String[] arg4={};
jobClient.run(arg4);
}
finally {
System.setOut(oldOut);
if (fileSys != null) {
fileSys.delete(testDir,true);
}
if (mr != null) {
mr.stop();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JobConf
* @throws Exception
*/
@SuppressWarnings("deprecation") @Test(timeout=500000) public void testNetworkedJob() throws Exception {
MiniMRClientCluster mr=null;
FileSystem fileSys=null;
try {
mr=createMiniClusterWithCapacityScheduler();
JobConf job=new JobConf(mr.getConfig());
fileSys=FileSystem.get(job);
fileSys.delete(testDir,true);
FSDataOutputStream out=fileSys.create(inFile,true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job,inFile);
FileOutputFormat.setOutputPath(job,outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client=new JobClient(mr.getConfig());
RunningJob rj=client.submitJob(job);
JobID jobId=rj.getID();
NetworkedJob runningJob=(NetworkedJob)client.getJob(jobId);
runningJob.setJobPriority(JobPriority.HIGH.name());
assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
assertEquals(runningJob.getID(),jobId);
assertEquals(runningJob.getJobID(),jobId.toString());
assertEquals(runningJob.getJobName(),"N/A");
assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
assertTrue(runningJob.getTrackingURL().length() > 0);
assertTrue(runningJob.mapProgress() == 0.0f);
assertTrue(runningJob.reduceProgress() == 0.0f);
assertTrue(runningJob.cleanupProgress() == 0.0f);
assertTrue(runningJob.setupProgress() == 0.0f);
TaskCompletionEvent[] tce=runningJob.getTaskCompletionEvents(0);
assertEquals(tce.length,0);
assertEquals(runningJob.getHistoryUrl(),"");
assertFalse(runningJob.isRetired());
assertEquals(runningJob.getFailureInfo(),"");
assertEquals(runningJob.getJobStatus().getJobName(),"N/A");
assertEquals(client.getMapTaskReports(jobId).length,0);
try {
client.getSetupTaskReports(jobId);
}
catch ( YarnRuntimeException e) {
assertEquals(e.getMessage(),"Unrecognized task type: JOB_SETUP");
}
try {
client.getCleanupTaskReports(jobId);
}
catch ( YarnRuntimeException e) {
assertEquals(e.getMessage(),"Unrecognized task type: JOB_CLEANUP");
}
assertEquals(client.getReduceTaskReports(jobId).length,0);
ClusterStatus status=client.getClusterStatus(true);
assertEquals(status.getActiveTrackerNames().size(),2);
assertEquals(status.getBlacklistedTrackers(),0);
assertEquals(status.getBlacklistedTrackerNames().size(),0);
assertEquals(status.getBlackListedTrackersInfo().size(),0);
assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING);
assertEquals(status.getMapTasks(),1);
assertEquals(status.getMaxMapTasks(),20);
assertEquals(status.getMaxReduceTasks(),4);
assertEquals(status.getNumExcludedNodes(),0);
assertEquals(status.getReduceTasks(),1);
assertEquals(status.getTaskTrackers(),2);
assertEquals(status.getTTExpiryInterval(),0);
assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING);
assertEquals(status.getGraylistedTrackers(),0);
ByteArrayOutputStream dataOut=new ByteArrayOutputStream();
status.write(new DataOutputStream(dataOut));
ClusterStatus status2=new ClusterStatus();
status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
assertEquals(status.getActiveTrackerNames(),status2.getActiveTrackerNames());
assertEquals(status.getBlackListedTrackersInfo(),status2.getBlackListedTrackersInfo());
assertEquals(status.getMapTasks(),status2.getMapTasks());
try {
}
catch ( RuntimeException e) {
assertTrue(e.getMessage().endsWith("not found on CLASSPATH"));
}
JobClient.setTaskOutputFilter(job,TaskStatusFilter.ALL);
assertEquals(JobClient.getTaskOutputFilter(job),TaskStatusFilter.ALL);
assertEquals(client.getDefaultMaps(),20);
assertEquals(client.getDefaultReduces(),4);
assertEquals(client.getSystemDir().getName(),"jobSubmitDir");
JobQueueInfo[] rootQueueInfo=client.getRootQueues();
assertEquals(rootQueueInfo.length,1);
assertEquals(rootQueueInfo[0].getQueueName(),"default");
JobQueueInfo[] qinfo=client.getQueues();
assertEquals(qinfo.length,1);
assertEquals(qinfo[0].getQueueName(),"default");
assertEquals(client.getChildQueues("default").length,0);
assertEquals(client.getJobsFromQueue("default").length,1);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
JobQueueInfo qi=client.getQueueInfo("default");
assertEquals(qi.getQueueName(),"default");
assertEquals(qi.getQueueState(),"running");
QueueAclsInfo[] aai=client.getQueueAclsForCurrentUser();
assertEquals(aai.length,2);
assertEquals(aai[0].getQueueName(),"root");
assertEquals(aai[1].getQueueName(),"default");
Token token=client.getDelegationToken(new Text(UserGroupInformation.getCurrentUser().getShortUserName()));
assertEquals(token.getKind().toString(),"RM_DELEGATION_TOKEN");
assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID());
assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime());
}
finally {
if (fileSys != null) {
fileSys.delete(testDir,true);
}
if (mr != null) {
mr.stop();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf job=new JobConf();
job.set("mapreduce.framework.name","local");
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputValueGroupingComparator(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.setCombinerKeyGroupingComparator(GroupComparator.class);
job.setInt("min.num.spills.for.combine",0);
JobClient client=new JobClient(job);
RunningJob runningJob=client.submitJob(job);
runningJob.waitForCompletion();
if (runningJob.isSuccessful()) {
Counters counters=runningJob.getCounters();
long combinerInputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_INPUT_RECORDS");
long combinerOutputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_OUTPUT_RECORDS");
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test QueueManager
* configuration from file
* @throws IOException
*/
@Test(timeout=5000) public void testQueue() throws IOException {
File f=null;
try {
f=writeFile();
QueueManager manager=new QueueManager(f.getCanonicalPath(),true);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(secondSubQueue.getProperties().getProperty("key"),"value");
assertEquals(secondSubQueue.getProperties().getProperty("key1"),"value1");
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
UserGroupInformation mockUGI=mock(UserGroupInformation.class);
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups={"group1"};
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("second",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
QueueAclsInfo[] qai=manager.getQueueAcls(mockUGI);
assertEquals(qai.length,1);
manager.refreshQueues(getConfiguration(),null);
iterator=root.getChildren().iterator();
Queue firstSubQueue1=iterator.next();
Queue secondSubQueue1=iterator.next();
assertTrue(firstSubQueue.equals(firstSubQueue1));
assertEquals(firstSubQueue1.getState().getStateName(),"running");
assertEquals(secondSubQueue1.getState().getStateName(),"stopped");
assertEquals(firstSubQueue1.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue1.getSchedulingInfo(),"queueInfoqueueInfo");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(),"first");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(),"running");
assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(),0);
assertEquals(manager.getSchedulerInfo("first"),"queueInfo");
Set queueJobQueueInfos=new HashSet();
for ( JobQueueInfo jobInfo : manager.getJobQueueInfos()) {
queueJobQueueInfos.add(jobInfo.getQueueName());
}
Set rootJobQueueInfos=new HashSet();
for ( Queue queue : root.getChildren()) {
rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName());
}
assertEquals(queueJobQueueInfos,rootJobQueueInfos);
assertEquals(manager.getJobQueueInfoMapping().get("first").getQueueName(),"first");
Writer writer=new StringWriter();
Configuration conf=getConfiguration();
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer,f.getAbsolutePath(),conf);
String result=writer.toString();
assertTrue(result.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0);
writer=new StringWriter();
QueueManager.dumpConfiguration(writer,conf);
result=writer.toString();
assertEquals("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",result);
QueueAclsInfo qi=new QueueAclsInfo();
assertNull(qi.getQueueName());
}
finally {
if (f != null) {
f.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test for Qmanager with empty configuration
* @throws IOException
*/
@Test(timeout=5000) public void test2Queue() throws IOException {
Configuration conf=getConfiguration();
QueueManager manager=new QueueManager(conf);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
assertTrue(manager.isRunning("first"));
assertFalse(manager.isRunning("second"));
assertEquals(firstSubQueue.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getSchedulingInfo(),"queueInfoqueueInfo");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
}
APIUtilityVerifier BooleanVerifier
/**
* test xml generation
* @throws ParserConfigurationException
* @throws Exception
*/
@Test(timeout=5000) public void testQueueConfigurationParser() throws ParserConfigurationException, Exception {
JobQueueInfo info=new JobQueueInfo("root","rootInfo");
JobQueueInfo infoChild1=new JobQueueInfo("child1","child1Info");
JobQueueInfo infoChild2=new JobQueueInfo("child2","child1Info");
info.addChild(infoChild1);
info.addChild(infoChild2);
DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance();
DocumentBuilder builder=docBuilderFactory.newDocumentBuilder();
Document document=builder.newDocument();
Element e=QueueConfigurationParser.getQueueElement(document,info);
DOMSource domSource=new DOMSource(e);
StringWriter writer=new StringWriter();
StreamResult result=new StreamResult(writer);
TransformerFactory tf=TransformerFactory.newInstance();
Transformer transformer=tf.newTransformer();
transformer.transform(domSource,result);
String str=writer.toString();
assertTrue(str.endsWith("root running child1 running child2 running "));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStatusLimit() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testStatusLimit");
Configuration conf=new Configuration();
Path inDir=new Path(test,"in");
Path outDir=new Path(test,"out");
FileSystem fs=FileSystem.get(conf);
if (fs.exists(inDir)) {
fs.delete(inDir,true);
}
fs.mkdirs(inDir);
DataOutputStream file=fs.create(new Path(inDir,"part-" + 0));
file.writeBytes("testStatusLimit");
file.close();
if (fs.exists(outDir)) {
fs.delete(outDir,true);
}
Job job=Job.getInstance(conf,"testStatusLimit");
job.setMapperClass(StatusLimitMapper.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job,inDir);
FileOutputFormat.setOutputPath(job,outDir);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for map-reduce job.
*/
@Test public void testReporterProgressForMRJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMRJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setReducerClass(ProgressTestingReducer.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(1);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,1,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for a map-only job.
* This will make sure that only the map phase decides the attempt's progress.
*/
@SuppressWarnings("deprecation") @Test public void testReporterProgressForMapOnlyJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMapOnlyJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(0);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,0,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void tesAllJobs() throws Exception {
final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class);
GetApplicationsResponse allApplicationsResponse=Records.newRecord(GetApplicationsResponse.class);
List applications=new ArrayList();
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.FAILED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.SUCCEEDED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.KILLED));
applications.add(getApplicationReport(YarnApplicationState.FAILED,FinalApplicationStatus.FAILED));
allApplicationsResponse.setApplicationList(applications);
Mockito.when(applicationsManager.getApplications(Mockito.any(GetApplicationsRequest.class))).thenReturn(allApplicationsResponse);
ResourceMgrDelegate resourceMgrDelegate=new ResourceMgrDelegate(new YarnConfiguration()){
@Override protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(applicationsManager);
}
}
;
JobStatus[] allJobs=resourceMgrDelegate.getAllJobs();
Assert.assertEquals(State.FAILED,allJobs[0].getState());
Assert.assertEquals(State.SUCCEEDED,allJobs[1].getState());
Assert.assertEquals(State.KILLED,allJobs[2].getState());
Assert.assertEquals(State.FAILED,allJobs[3].getState());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that getRootQueues makes a request for the (recursive) child queues
* @throws IOException
*/
@Test public void testGetRootQueues() throws IOException, InterruptedException {
final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class);
GetQueueInfoResponse response=Mockito.mock(GetQueueInfoResponse.class);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(response.getQueueInfo()).thenReturn(queueInfo);
try {
Mockito.when(applicationsManager.getQueueInfo(Mockito.any(GetQueueInfoRequest.class))).thenReturn(response);
}
catch ( YarnException e) {
throw new IOException(e);
}
ResourceMgrDelegate delegate=new ResourceMgrDelegate(new YarnConfiguration()){
@Override protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(applicationsManager);
}
}
;
delegate.getRootQueues();
ArgumentCaptor argument=ArgumentCaptor.forClass(GetQueueInfoRequest.class);
try {
Mockito.verify(applicationsManager).getQueueInfo(argument.capture());
}
catch ( YarnException e) {
throw new IOException(e);
}
Assert.assertTrue("Children of root queue not requested",argument.getValue().getIncludeChildQueues());
Assert.assertTrue("Request wasn't to recurse through children",argument.getValue().getRecursive());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecoveryFromOtherVersions() throws IOException {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(12345,1);
final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
ShuffleHandler shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
int rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version=Version.newInstance(1,0);
Assert.assertEquals(version,shuffle.getCurrentVersion());
Version version11=Version.newInstance(1,1);
shuffle.storeVersion(version11);
Assert.assertEquals(version11,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
Assert.assertEquals(version,shuffle.loadVersion());
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version21=Version.newInstance(2,1);
shuffle.storeVersion(version21);
Assert.assertEquals(version21,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
try {
shuffle.start();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for state DB schema:"));
}
}
finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify client prematurely closing a connection.
* @throws Exception exception.
*/
@Test(timeout=10000) public void testClientClosesConnection() throws Exception {
final ArrayList failures=new ArrayList(1);
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( Configuration conf){
return new Shuffle(conf){
@Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException {
return null;
}
@Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException {
super.setResponseHeaders(response,keepAliveParam,100);
}
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
}
@Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
}
;
}
}
;
shuffleHandler.init(conf);
shuffleHandler.start();
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
Assert.assertEquals("close",conn.getHeaderField(HttpHeaders.CONNECTION));
ShuffleHeader header=new ShuffleHeader();
header.readFields(input);
input.close();
shuffleHandler.stop();
Assert.assertTrue("sendError called when client closed connection",failures.size() == 0);
}
APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Validate the ownership of the map-output files being pulled in. The
* local-file-system owner of the file should match the user component in the
* @throws Exception exception
*/
@Test(timeout=100000) public void testMapFileAccess() throws IOException {
assumeTrue(NativeIO.isAvailable());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
File absLogDir=new File("target",TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,absLogDir.getAbsolutePath());
ApplicationId appId=ApplicationId.newInstance(12345,1);
LOG.info(appId.toString());
String appAttemptId="attempt_12345_1_m_1_0";
String user="randomUser";
String reducerId="0";
List fileMap=new ArrayList();
createShuffleHandlerFiles(absLogDir,user,appId.toString(),appAttemptId,conf,fileMap);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( Configuration conf){
return new Shuffle(conf){
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
}
;
}
}
;
shuffleHandler.init(conf);
try {
shuffleHandler.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffleHandler.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_0001&reduce="+ reducerId+ "&map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
byte[] byteArr=new byte[10000];
try {
DataInputStream is=new DataInputStream(conn.getInputStream());
is.readFully(byteArr);
}
catch ( EOFException e) {
}
FileInputStream is=new FileInputStream(fileMap.get(0));
String owner=NativeIO.POSIX.getFstat(is.getFD()).getOwner();
is.close();
String message="Owner '" + owner + "' for path "+ fileMap.get(0).getAbsolutePath()+ " did not match expected owner '"+ user+ "'";
Assert.assertTrue((new String(byteArr)).contains(message));
}
finally {
shuffleHandler.stop();
}
}
BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testSkipBadRecords(){
Configuration conf=new Configuration();
assertEquals(2,SkipBadRecords.getAttemptsToStartSkipping(conf));
assertTrue(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertTrue(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(0,SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(0,SkipBadRecords.getReducerMaxSkipGroups(conf),0);
assertNull(SkipBadRecords.getSkipOutputPath(conf));
SkipBadRecords.setAttemptsToStartSkipping(conf,5);
SkipBadRecords.setAutoIncrMapperProcCount(conf,false);
SkipBadRecords.setAutoIncrReducerProcCount(conf,false);
SkipBadRecords.setMapperMaxSkipRecords(conf,6L);
SkipBadRecords.setReducerMaxSkipGroups(conf,7L);
JobConf jc=new JobConf();
SkipBadRecords.setSkipOutputPath(jc,new Path("test"));
assertEquals(5,SkipBadRecords.getAttemptsToStartSkipping(conf));
assertFalse(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertFalse(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(6L,SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(7L,SkipBadRecords.getReducerMaxSkipGroups(conf),0);
assertEquals("test",SkipBadRecords.getSkipOutputPath(jc).toString());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckpointIDTracking() throws IOException, InterruptedException {
SystemClock clock=new SystemClock();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob=mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
AppContext appCtx=mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
when(appCtx.getEventHandler()).thenReturn(ea);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){
@Override protected void registerHeartbeatHandler( Configuration conf){
taskHeartbeatHandler=hbHandler;
}
}
;
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.TASK_PREEMPTION,true);
listener.init(conf);
listener.start();
TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0);
List partialOut=new ArrayList();
partialOut.add(new Path("/prev1"));
partialOut.add(new Path("/prev2"));
Counters counters=mock(Counters.class);
final long CBYTES=64L * 1024 * 1024;
final long CTIME=4344L;
final Path CLOC=new Path("/test/1");
Counter cbytes=mock(Counter.class);
when(cbytes.getValue()).thenReturn(CBYTES);
Counter ctime=mock(Counter.class);
when(ctime.getValue()).thenReturn(CTIME);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime);
TaskCheckpointID incid=new TaskCheckpointID(new FSCheckpointID(CLOC),partialOut,counters);
listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()),incid);
CheckpointID outcid=listener.getCheckpointID(tid.getTaskID());
TaskCheckpointID tcid=(TaskCheckpointID)outcid;
assertEquals(CBYTES,tcid.getCheckpointBytes());
assertEquals(CTIME,tcid.getCheckpointTime());
assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput()));
assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut));
assert outcid == incid;
listener.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testGetTask() throws IOException {
AppContext appCtx=mock(AppContext.class);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy);
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
JVMId id=new JVMId("foo",1,true,1);
WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId());
JvmContext context=new JvmContext();
context.jvmId=id;
JvmTask result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
TaskAttemptId attemptID=mock(TaskAttemptId.class);
Task task=mock(Task.class);
listener.registerPendingTask(task,wid);
result=listener.getTask(context);
assertNull(result);
listener.unregister(attemptID,wid);
listener.registerPendingTask(task,wid);
listener.registerLaunchedTask(attemptID,wid);
verify(hbHandler).register(attemptID);
result=listener.getTask(context);
assertNotNull(result);
assertFalse(result.shouldDie);
result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.unregister(attemptID,wid);
result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.stop();
JVMId jvmid=JVMId.forName("jvm_001_002_m_004");
assertNotNull(jvmid);
try {
JVMId.forName("jvm_001_002_m_004_006");
fail();
}
catch ( IllegalArgumentException e) {
assertEquals(e.getMessage(),"TaskId string : jvm_001_002_m_004_006 is not properly formed");
}
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("rawtypes") @Test public void testStatusUpdateProgress() throws IOException, InterruptedException {
AppContext appCtx=mock(AppContext.class);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy);
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
JVMId id=new JVMId("foo",1,true,1);
WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId());
TaskAttemptID attemptID=new TaskAttemptID("1",1,TaskType.MAP,1,1);
TaskAttemptId attemptId=TypeConverter.toYarn(attemptID);
Task task=mock(Task.class);
listener.registerPendingTask(task,wid);
listener.registerLaunchedTask(attemptId,wid);
verify(hbHandler).register(attemptId);
AMFeedback feedback=listener.statusUpdate(attemptID,null);
assertTrue(feedback.getTaskFound());
verify(hbHandler,never()).progressing(eq(attemptId));
MapTaskStatus mockStatus=new MapTaskStatus(attemptID,0.0f,1,TaskStatus.State.RUNNING,"","RUNNING","",TaskStatus.Phase.MAP,new Counters());
feedback=listener.statusUpdate(attemptID,mockStatus);
assertTrue(feedback.getTaskFound());
verify(hbHandler).progressing(eq(attemptId));
listener.close();
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCommitWindow() throws IOException {
SystemClock clock=new SystemClock();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob=mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
AppContext appCtx=mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){
@Override protected void registerHeartbeatHandler( Configuration conf){
taskHeartbeatHandler=hbHandler;
}
}
;
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0);
boolean canCommit=listener.canCommit(tid);
assertFalse(canCommit);
verify(mockTask,never()).canCommit(any(TaskAttemptId.class));
when(rmHeartbeatHandler.getLastHeartbeatTime()).thenReturn(clock.getTime());
canCommit=listener.canCommit(tid);
assertTrue(canCommit);
verify(mockTask,times(1)).canCommit(any(TaskAttemptId.class));
listener.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test without TASK_LOG_DIR
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException {
System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
assertEquals(TaskLog.getMRv2LogDir(),null);
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test TaskAttemptID
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLog() throws IOException {
System.setProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR,"testString");
assertEquals(TaskLog.getMRv2LogDir(),"testString");
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("testString" + File.separatorChar + "stdout"));
File indexFile=TaskLog.getIndexFile(taid,true);
if (!indexFile.getParentFile().exists()) {
indexFile.getParentFile().mkdirs();
}
indexFile.delete();
indexFile.createNewFile();
TaskLog.syncLogs("location",taid,true);
assertTrue(indexFile.getAbsolutePath().endsWith("userlogs" + File.separatorChar + "job_job_0001"+ File.separatorChar+ "JobId.cleanup"+ File.separatorChar+ "log.index"));
f=TaskLog.getRealTaskLogFileLocation(taid,true,LogName.DEBUGOUT);
if (f != null) {
assertTrue(f.getAbsolutePath().endsWith("location" + File.separatorChar + "debugout"));
FileUtils.copyFile(indexFile,f);
}
assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0);
assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT,taid,true).length() > 0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test TaskLogAppender
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskLogAppender(){
TaskLogAppender appender=new TaskLogAppender();
System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001");
System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY,"1003");
appender.activateOptions();
assertEquals(appender.getTaskId(),"attempt_01_02_m03_04_001");
assertEquals(appender.getTotalLogFileSize(),1000);
assertEquals(appender.getIsCleanup(),false);
Writer writer=new StringWriter();
appender.setWriter(writer);
Layout layout=new PatternLayout("%-5p [%t]: %m%n");
appender.setLayout(layout);
Category logger=Logger.getLogger(getClass().getName());
LoggingEvent event=new LoggingEvent("fqnOfCategoryClass",logger,Priority.INFO,"message",new Throwable());
appender.append(event);
appender.flush();
appender.close();
assertTrue(writer.toString().length() > 0);
appender=new TaskLogAppender();
appender.setIsCleanup(true);
appender.activateOptions();
assertEquals(appender.getIsCleanup(),true);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Path file=new Path(workDir,"test.txt");
Reporter reporter=Reporter.NULL;
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(job);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.debug("splitting: got = " + splits.length);
if (length == 0) {
assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
assertEquals("Empty file length == 0",0,splits[0].getLength());
}
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],job,reporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test(timeout=5000) public void testMRMaxLine() throws Exception {
final int MAXPOS=1024 * 1024;
final int MAXLINE=10 * 1024;
final int BUF=64 * 1024;
final InputStream infNull=new InputStream(){
int position=0;
final int MAXPOSBUF=1024 * 1024 + BUF;
@Override public int read(){
++position;
return 0;
}
@Override public int read( byte[] b){
assertTrue("Read too many bytes from the stream",position < MAXPOSBUF);
Arrays.fill(b,(byte)0);
position+=b.length;
return b.length;
}
public void reset(){
position=0;
}
}
;
final LongWritable key=new LongWritable();
final Text val=new Text();
LOG.info("Reading a line from /dev/null");
final Configuration conf=new Configuration(false);
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,MAXLINE);
conf.setInt("io.file.buffer.size",BUF);
LineRecordReader lrr=new LineRecordReader(infNull,0,MAXPOS,conf);
assertFalse("Read a line from null",lrr.next(key,val));
infNull.reset();
lrr=new LineRecordReader(infNull,0L,MAXLINE,MAXPOS);
assertFalse("Read a line from null",lrr.next(key,val));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=900000) public void testSplitableCodecs() throws IOException {
JobConf conf=new JobConf(defaultConf);
int seed=new Random().nextInt();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
Reporter reporter=Reporter.NULL;
LOG.info("seed = " + seed);
Random random=new Random(seed);
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(conf,workDir);
final int MAX_LENGTH=500000;
for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(conf);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(conf,numSplits);
LOG.info("splitting: got = " + splits.length);
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],conf,reporter);
try {
int counter=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
counter++;
}
if (counter > 0) {
LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
else {
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testLogFilter(){
PathFilter filter=new Utils.OutputFileUtils.OutputLogFilter();
for ( Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : SUCCEEDED_PATHS) {
assertTrue(filter.accept(p));
}
for ( Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testOutputFilesFilter(){
PathFilter filter=new Utils.OutputFileUtils.OutputFilesFilter();
for ( Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : SUCCEEDED_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
BooleanVerifier
@Test(timeout=20000) public void testResourceMgrDelegate() throws Exception {
final ApplicationClientProtocol clientRMProtocol=mock(ApplicationClientProtocol.class);
ResourceMgrDelegate delegate=new ResourceMgrDelegate(conf){
@Override protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(clientRMProtocol);
}
}
;
when(clientRMProtocol.forceKillApplication(any(KillApplicationRequest.class))).thenReturn(KillApplicationResponse.newInstance(true));
delegate.killApplication(appId);
verify(clientRMProtocol).forceKillApplication(any(KillApplicationRequest.class));
when(clientRMProtocol.getApplications(any(GetApplicationsRequest.class))).thenReturn(recordFactory.newRecordInstance(GetApplicationsResponse.class));
delegate.getAllJobs();
verify(clientRMProtocol).getApplications(any(GetApplicationsRequest.class));
when(clientRMProtocol.getApplicationReport(any(GetApplicationReportRequest.class))).thenReturn(recordFactory.newRecordInstance(GetApplicationReportResponse.class));
delegate.getApplicationReport(appId);
verify(clientRMProtocol).getApplicationReport(any(GetApplicationReportRequest.class));
GetClusterMetricsResponse clusterMetricsResponse=recordFactory.newRecordInstance(GetClusterMetricsResponse.class);
clusterMetricsResponse.setClusterMetrics(recordFactory.newRecordInstance(YarnClusterMetrics.class));
when(clientRMProtocol.getClusterMetrics(any(GetClusterMetricsRequest.class))).thenReturn(clusterMetricsResponse);
delegate.getClusterMetrics();
verify(clientRMProtocol).getClusterMetrics(any(GetClusterMetricsRequest.class));
when(clientRMProtocol.getClusterNodes(any(GetClusterNodesRequest.class))).thenReturn(recordFactory.newRecordInstance(GetClusterNodesResponse.class));
delegate.getActiveTrackers();
verify(clientRMProtocol).getClusterNodes(any(GetClusterNodesRequest.class));
GetNewApplicationResponse newAppResponse=recordFactory.newRecordInstance(GetNewApplicationResponse.class);
newAppResponse.setApplicationId(appId);
when(clientRMProtocol.getNewApplication(any(GetNewApplicationRequest.class))).thenReturn(newAppResponse);
delegate.getNewJobID();
verify(clientRMProtocol).getNewApplication(any(GetNewApplicationRequest.class));
GetQueueInfoResponse queueInfoResponse=recordFactory.newRecordInstance(GetQueueInfoResponse.class);
queueInfoResponse.setQueueInfo(recordFactory.newRecordInstance(QueueInfo.class));
when(clientRMProtocol.getQueueInfo(any(GetQueueInfoRequest.class))).thenReturn(queueInfoResponse);
delegate.getQueues();
verify(clientRMProtocol).getQueueInfo(any(GetQueueInfoRequest.class));
GetQueueUserAclsInfoResponse aclResponse=recordFactory.newRecordInstance(GetQueueUserAclsInfoResponse.class);
when(clientRMProtocol.getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class))).thenReturn(aclResponse);
delegate.getQueueAclsForCurrentUser();
verify(clientRMProtocol).getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class));
}
BooleanVerifier
@Test(timeout=20000) public void testJobSubmissionFailure() throws Exception {
when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).thenReturn(appId);
ApplicationReport report=mock(ApplicationReport.class);
when(report.getApplicationId()).thenReturn(appId);
when(report.getDiagnostics()).thenReturn(failString);
when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.FAILED);
when(resourceMgrDelegate.getApplicationReport(appId)).thenReturn(report);
Credentials credentials=new Credentials();
File jobxml=new File(testWorkDir,"job.xml");
OutputStream out=new FileOutputStream(jobxml);
conf.writeXml(out);
out.close();
try {
yarnRunner.submitJob(jobId,testWorkDir.getAbsolutePath().toString(),credentials);
}
catch ( IOException io) {
LOG.info("Logging exception:",io);
assertTrue(io.getLocalizedMessage().contains(failString));
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=20000) public void testWarnCommandOpts() throws Exception {
Logger logger=Logger.getLogger(YARNRunner.class);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
Layout layout=new SimpleLayout();
Appender appender=new WriterAppender(layout,bout);
logger.addAppender(appender);
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,"-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS,"-Xmx1024m -Djava.library.path=bar");
YARNRunner yarnRunner=new YARNRunner(jobConf);
@SuppressWarnings("unused") ApplicationSubmissionContext submissionContext=buildSubmitContext(yarnRunner,jobConf);
String logMsg=bout.toString();
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + "longer function if hadoop native libraries are used. These values "+ "should be set as part of the LD_LIBRARY_PATH in the app master JVM "+ "env using yarn.app.mapreduce.am.admin.user.env config settings."));
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + "function if hadoop native libraries are used. These values should "+ "be set as part of the LD_LIBRARY_PATH in the app master JVM env "+ "using yarn.app.mapreduce.am.env config settings."));
}
APIUtilityVerifier BranchVerifier BooleanVerifier
@Test(timeout=20000) public void testAMAdminCommandOpts() throws Exception {
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,"-Djava.net.preferIPv4Stack=true");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS,"-Xmx1024m");
YARNRunner yarnRunner=new YARNRunner(jobConf);
ApplicationSubmissionContext submissionContext=buildSubmitContext(yarnRunner,jobConf);
ContainerLaunchContext containerSpec=submissionContext.getAMContainerSpec();
List commands=containerSpec.getCommands();
int index=0;
int adminIndex=0;
int adminPos=-1;
int userIndex=0;
int userPos=-1;
for ( String command : commands) {
if (command != null) {
adminPos=command.indexOf("-Djava.net.preferIPv4Stack=true");
if (adminPos >= 0) adminIndex=index;
userPos=command.indexOf("-Xmx1024m");
if (userPos >= 0) userIndex=index;
}
index++;
}
assertTrue("AM admin command opts not in the commands.",adminPos > 0);
assertTrue("AM user command opts not in the commands.",userPos > 0);
if (adminIndex == userIndex) {
assertTrue("AM admin command opts is after user command opts.",adminPos < userPos);
}
else {
assertTrue("AM admin command opts is after user command opts.",adminIndex < userIndex);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if {@link CompressionEmulationUtil#configureCompressionEmulation(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.mapred.JobConf)}can extract compression related configuration parameters.
*/
@Test public void testExtractCompressionConfigs(){
JobConf source=new JobConf();
JobConf target=new JobConf();
source.setBoolean(FileOutputFormat.COMPRESS,false);
source.set(FileOutputFormat.COMPRESS_CODEC,"MyDefaultCodec");
source.set(FileOutputFormat.COMPRESS_TYPE,"MyDefaultType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyDefaultCodec2");
CompressionEmulationUtil.configureCompressionEmulation(source,target);
assertFalse(target.getBoolean(FileOutputFormat.COMPRESS,true));
assertEquals("MyDefaultCodec",target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyDefaultType",target.get(FileOutputFormat.COMPRESS_TYPE));
assertFalse(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true));
assertEquals("MyDefaultCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertFalse(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target));
source.setBoolean(FileOutputFormat.COMPRESS,true);
source.set(FileOutputFormat.COMPRESS_CODEC,"MyCodec");
source.set(FileOutputFormat.COMPRESS_TYPE,"MyType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyCodec2");
org.apache.hadoop.mapred.FileInputFormat.setInputPaths(source,"file.gz");
target=new JobConf();
CompressionEmulationUtil.configureCompressionEmulation(source,target);
assertTrue(target.getBoolean(FileOutputFormat.COMPRESS,false));
assertEquals("MyCodec",target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyType",target.get(FileOutputFormat.COMPRESS_TYPE));
assertTrue(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false));
assertEquals("MyCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertTrue(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target));
}
BooleanVerifier
/**
* Test compression ratio with multiple compression ratios.
*/
@Test public void testCompressionRatios() throws Exception {
testCompressionRatioConfigure(0F);
testCompressionRatioConfigure(0.2F);
testCompressionRatioConfigure(0.4F);
testCompressionRatioConfigure(0.65F);
testCompressionRatioConfigure(0.682F);
testCompressionRatioConfigure(0.567F);
boolean failed=false;
try {
testCompressionRatioConfigure(0.01F);
}
catch ( RuntimeException re) {
failed=true;
}
assertTrue("Compression ratio min value (0.07) check failed!",failed);
failed=false;
try {
testCompressionRatioConfigure(0.7F);
}
catch ( RuntimeException re) {
failed=true;
}
assertTrue("Compression ratio max value (0.68) check failed!",failed);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link RandomTextDataMapper} via {@link CompressionEmulationUtil}.
*/
@Test public void testRandomCompressedTextDataGenerator() throws Exception {
int wordSize=10;
int listSize=20;
long dataSize=10 * 1024 * 1024;
Configuration conf=new Configuration();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE,listSize);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE,wordSize);
conf.setLong(GenerateData.GRIDMIX_GEN_BYTES,dataSize);
conf.set("mapreduce.job.hdfs-servers","");
FileSystem lfs=FileSystem.getLocal(conf);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestRandomCompressedTextDataGenr");
lfs.delete(tempDir,true);
runDataGenJob(conf,tempDir);
FileStatus[] files=lfs.listStatus(tempDir,new Utils.OutputFileUtils.OutputFilesFilter());
long size=0;
long maxLineSize=0;
for ( FileStatus status : files) {
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(status.getPath(),conf,0);
BufferedReader reader=new BufferedReader(new InputStreamReader(in));
String line=reader.readLine();
if (line != null) {
long lineSize=line.getBytes().length;
if (lineSize > maxLineSize) {
maxLineSize=lineSize;
}
while (line != null) {
for ( String word : line.split("\\s")) {
size+=word.getBytes().length;
}
line=reader.readLine();
}
}
reader.close();
}
assertTrue(size >= dataSize);
assertTrue(size <= dataSize + maxLineSize);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test compressible {@link GridmixRecord}.
*/
@Test public void testCompressibleGridmixRecord() throws IOException {
JobConf conf=new JobConf();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
FileSystem lfs=FileSystem.getLocal(conf);
int dataSize=1024 * 1024 * 10;
float ratio=0.357F;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestPossiblyCompressibleGridmixRecord");
lfs.delete(tempDir,true);
GridmixRecord record=new GridmixRecord(dataSize,0);
record.setCompressibility(true,ratio);
conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true);
Path recordFile=new Path(tempDir,"record");
OutputStream outStream=CompressionEmulationUtil.getPossiblyCompressedOutputStream(recordFile,conf);
DataOutputStream out=new DataOutputStream(outStream);
record.write(out);
out.close();
outStream.close();
Path actualRecordFile=recordFile.suffix(".gz");
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(actualRecordFile,conf,0);
long compressedFileSize=lfs.listStatus(actualRecordFile)[0].getLen();
GridmixRecord recordRead=new GridmixRecord();
recordRead.readFields(new DataInputStream(in));
assertEquals("Record size mismatch in a compressible GridmixRecord",dataSize,recordRead.getSize());
assertTrue("Failed to generate a compressible GridmixRecord",recordRead.getSize() > compressedFileSize);
float seenRatio=((float)compressedFileSize) / dataSize;
assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),CompressionEmulationUtil.standardizeCompressionRatio(seenRatio),1.0D);
}
BooleanVerifier
/**
* Test {@link CompressionEmulationUtil#isCompressionEmulationEnabled(org.apache.hadoop.conf.Configuration)}.
*/
@Test public void testIsCompressionEmulationEnabled(){
Configuration conf=new Configuration();
assertTrue(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
assertFalse(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
assertTrue(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test the configuration property for disabling/enabling emulation of
* distributed cache load.
*/
@Test(timeout=2000) public void testDistCacheEmulationConfigurability() throws IOException {
Configuration jobConf=GridmixTestUtils.mrvl.getConfig();
Path ioPath=new Path("testDistCacheEmulationConfigurability").makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs=FileSystem.get(jobConf);
FileSystem.mkdirs(fs,ioPath,new FsPermission((short)0777));
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertTrue("Default configuration of " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " is wrong.",dce.shouldEmulateDistCacheLoad());
jobConf.setBoolean(DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE,false);
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertFalse("Disabling of emulation of distributed cache load by setting " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " to false is not working.",dce.shouldEmulateDistCacheLoad());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate GenerateDistCacheData job if it creates dist cache files properly.
* @throws Exception
*/
@Test(timeout=200000) public void testGenerateDistCacheData() throws Exception {
long[] sortedFileSizes=new long[5];
Configuration jobConf=runSetupGenerateDistCacheData(true,sortedFileSizes);
GridmixJob gridmixJob=new GenerateDistCacheData(jobConf);
Job job=gridmixJob.call();
assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",0,job.getNumReduceTasks());
assertTrue("GenerateDistCacheData job failed.",job.waitForCompletion(false));
validateDistCacheData(jobConf,sortedFileSizes);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPool() throws Exception {
final Random r=new Random();
final Configuration conf=new Configuration();
conf.setLong(FilePool.GRIDMIX_MIN_FILE,3 * 1024);
final FilePool pool=new FilePool(conf,base);
pool.refresh();
final ArrayList files=new ArrayList();
final int expectedPoolSize=(NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024;
assertEquals(expectedPoolSize,pool.getInputFiles(Long.MAX_VALUE,files));
assertEquals(NFILES - 4,files.size());
files.clear();
assertEquals(expectedPoolSize,pool.getInputFiles(expectedPoolSize,files));
files.clear();
final long rand=r.nextInt(expectedPoolSize);
assertTrue("Missed: " + rand,(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand,files));
conf.setLong(FilePool.GRIDMIX_MIN_FILE,0);
pool.refresh();
files.clear();
assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,pool.getInputFiles(Long.MAX_VALUE,files));
}
APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testSerialReaderThread() throws Exception {
Configuration conf=new Configuration();
File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount2.json");
JobStoryProducer jobProducer=new ZombieJobProducer(new Path(fin.getAbsolutePath()),null,conf);
CountDownLatch startFlag=new CountDownLatch(1);
UserResolver resolver=new SubmitterUserResolver();
FakeJobSubmitter submitter=new FakeJobSubmitter();
File ws=new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory=new SerialJobFactory(submitter,jobProducer,new Path(ws.getAbsolutePath()),conf,startFlag,resolver);
Path ioPath=new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf,ioPath));
Thread test=jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
assertEquals(0,submitter.getJobs().size());
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
assertEquals(2,submitter.getJobs().size());
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000) public void testSleepReducer() throws Exception {
Configuration conf=new Configuration();
conf.setInt(JobContext.NUM_REDUCES,2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(FileOutputFormat.COMPRESS,true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
TaskAttemptID taskId=new TaskAttemptID();
RawKeyValueIterator input=new FakeRawKeyValueReducerIterator();
Counter counter=new GenericCounter();
Counter inputValueCounter=new GenericCounter();
RecordWriter output=new LoadRecordReduceWriter();
OutputCommitter committer=new CustomOutputCommitter();
StatusReporter reporter=new DummyReporter();
RawComparator comparator=new FakeRawComparator();
ReduceContext reducecontext=new ReduceContextImpl(conf,taskId,input,counter,inputValueCounter,output,committer,reporter,comparator,GridmixKey.class,NullWritable.class);
org.apache.hadoop.mapreduce.Reducer.Context context=new WrappedReducer().getReducerContext(reducecontext);
SleepReducer test=new SleepReducer();
long start=System.currentTimeMillis();
test.setup(context);
long sleeper=context.getCurrentKey().getReduceOutputBytes();
assertEquals("Sleeping... " + sleeper + " ms left",context.getStatus());
assertTrue(System.currentTimeMillis() >= (start + sleeper));
test.cleanup(context);
assertEquals("Slept for " + sleeper,context.getStatus());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCompareGridmixJob() throws Exception {
Configuration conf=new Configuration();
Path outRoot=new Path("target");
JobStory jobDesc=mock(JobStory.class);
when(jobDesc.getName()).thenReturn("JobName");
when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
GridmixJob j1=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0);
GridmixJob j2=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0);
GridmixJob j3=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1);
GridmixJob j4=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1);
assertTrue(j1.equals(j2));
assertEquals(0,j1.compareTo(j2));
assertFalse(j1.equals(j3));
assertEquals(-1,j1.compareTo(j3));
assertEquals(-1,j1.compareTo(j4));
}
BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","rawtypes"}) @Test(timeout=30000) public void testSleepMapper() throws Exception {
SleepJob.SleepMapper test=new SleepJob.SleepMapper();
Configuration conf=new Configuration();
conf.setInt(JobContext.NUM_REDUCES,2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
TaskAttemptID taskId=new TaskAttemptID();
FakeRecordLLReader reader=new FakeRecordLLReader();
LoadRecordGkNullWriter writer=new LoadRecordGkNullWriter();
OutputCommitter committer=new CustomOutputCommitter();
StatusReporter reporter=new TaskAttemptContextImpl.DummyReporter();
SleepSplit split=getSleepSplit();
MapContext mapcontext=new MapContextImpl(conf,taskId,reader,writer,committer,reporter,split);
Context context=new WrappedMapper().getMapContext(mapcontext);
long start=System.currentTimeMillis();
LOG.info("start:" + start);
LongWritable key=new LongWritable(start + 2000);
LongWritable value=new LongWritable(start + 2000);
test.map(key,value,context);
LOG.info("finish:" + System.currentTimeMillis());
assertTrue(System.currentTimeMillis() >= (start + 2000));
test.cleanup(context);
assertEquals(1,writer.getData().size());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link TotalHeapUsageEmulatorPlugin}.
*/
@Test public void testTotalHeapUsageEmulatorPlugin() throws Exception {
Configuration conf=new Configuration();
ResourceCalculatorPlugin monitor=new DummyResourceCalculatorPlugin();
long maxHeapUsage=1024 * TotalHeapUsageEmulatorPlugin.ONE_MB;
conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,maxHeapUsage);
monitor.setConf(conf);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
long targetHeapUsageInMB=200;
FakeProgressive fakeProgress=new FakeProgressive();
FakeHeapUsageEmulatorCore fakeCore=new FakeHeapUsageEmulatorCore();
FakeHeapUsageEmulatorPlugin heapPlugin=new FakeHeapUsageEmulatorPlugin(fakeCore);
ResourceUsageMetrics invalidUsage=TestResourceUsageEmulators.createMetrics(0);
heapPlugin.initialize(conf,invalidUsage,null,null);
int numCallsPre=fakeCore.getNumCalls();
long heapUsagePre=fakeCore.getHeapUsageInMB();
heapPlugin.emulate();
int numCallsPost=fakeCore.getNumCalls();
long heapUsagePost=fakeCore.getHeapUsageInMB();
assertEquals("Disabled heap usage emulation plugin works!",numCallsPre,numCallsPost);
assertEquals("Disabled heap usage emulation plugin works!",heapUsagePre,heapUsagePost);
float progress=heapPlugin.getProgress();
assertEquals("Invalid progress of disabled cumulative heap usage emulation " + "plugin!",1.0f,progress,0f);
Boolean failed=null;
invalidUsage=TestResourceUsageEmulators.createMetrics(maxHeapUsage + TotalHeapUsageEmulatorPlugin.ONE_MB);
try {
heapPlugin.initialize(conf,invalidUsage,monitor,null);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull("Fail case failure!",failed);
assertTrue("Expected failure!",failed);
ResourceUsageMetrics metrics=TestResourceUsageEmulators.createMetrics(targetHeapUsageInMB * TotalHeapUsageEmulatorPlugin.ONE_MB);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.2F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,5);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.5F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,120,2);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.25F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,162,6);
fakeProgress=new FakeProgressive();
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.25F);
heapPlugin.initialize(conf,metrics,monitor,fakeProgress);
fakeCore.resetFake();
long initHeapUsage=fakeCore.getHeapUsageInMB();
long initNumCallsUsage=fakeCore.getNumCalls();
testEmulationBoundary(0F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 0 progress]");
testEmulationBoundary(0.24F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 24% progress]");
testEmulationBoundary(0.25F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB / 4,1,"[op, 25% progress]");
testEmulationBoundary(0.80F,fakeCore,fakeProgress,heapPlugin,(targetHeapUsageInMB * 4) / 5,2,"[op, 80% progress]");
testEmulationBoundary(1F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB,3,"[op, 100% progress]");
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testMain() throws Exception {
SecurityManager securityManager=System.getSecurityManager();
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldOut=System.out;
System.setErr(out);
ExitUtil.disableSystemExit();
try {
String[] argv=new String[0];
DebugGridmix.main(argv);
}
catch ( ExitUtil.ExitException e) {
assertEquals("ExitException",e.getMessage());
ExitUtil.resetFirstExitException();
}
finally {
System.setErr(oldOut);
System.setSecurityManager(securityManager);
}
String print=bytes.toString();
assertTrue(print.contains("Usage: gridmix [-generate ] [-users URI] [-Dname=value ...] "));
assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link DataStatistics}.
*/
@Test public void testDataStatistics() throws Exception {
DataStatistics stats=new DataStatistics(10,2,true);
assertEquals("Data size mismatch",10,stats.getDataSize());
assertEquals("Num files mismatch",2,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
stats=new DataStatistics(100,5,false);
assertEquals("Data size mismatch",100,stats.getDataSize());
assertEquals("Num files mismatch",5,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
Configuration conf=new Configuration();
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testDataStatistics");
FileSystem fs=testDir.getFileSystem(conf);
fs.delete(testDir,true);
Path testInputDir=new Path(testDir,"test");
fs.mkdirs(testInputDir);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
Boolean failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1024L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
stats=GenerateData.publishDataStatistics(testInputDir,1024L,conf);
assertEquals("Data size mismatch",0,stats.getDataSize());
assertEquals("Num files mismatch",0,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
Path inputDataFile=new Path(testInputDir,"test");
long size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello bye").size();
stats=GenerateData.publishDataStatistics(testInputDir,-1,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1234L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
fs.delete(inputDataFile,false);
inputDataFile=new Path(testInputDir,"test.gz");
size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello").size();
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link ExecutionSummarizer}.
*/
@Test @SuppressWarnings({"unchecked","rawtypes"}) public void testExecutionSummarizer() throws IOException {
Configuration conf=new Configuration();
ExecutionSummarizer es=new ExecutionSummarizer();
assertEquals("ExecutionSummarizer init failed",Summarizer.NA,es.getCommandLineArgsString());
long startTime=System.currentTimeMillis();
String[] initArgs=new String[]{"-Xmx20m","-Dtest.args='test'"};
es=new ExecutionSummarizer(initArgs);
assertEquals("ExecutionSummarizer init failed","-Xmx20m -Dtest.args='test'",es.getCommandLineArgsString());
assertTrue("Start time mismatch",es.getStartTime() >= startTime);
assertTrue("Start time mismatch",es.getStartTime() <= System.currentTimeMillis());
es.update(null);
assertEquals("ExecutionSummarizer init failed",0,es.getSimulationStartTime());
testExecutionSummarizer(0,0,0,0,0,0,0,es);
long simStartTime=System.currentTimeMillis();
es.start(null);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() >= simStartTime);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() <= System.currentTimeMillis());
JobStats stats=generateFakeJobStats(1,10,true,false);
es.update(stats);
testExecutionSummarizer(1,10,0,1,1,0,0,es);
stats=generateFakeJobStats(5,1,false,false);
es.update(stats);
testExecutionSummarizer(6,11,0,2,1,1,0,es);
stats=generateFakeJobStats(1,1,true,true);
es.update(stats);
testExecutionSummarizer(7,12,0,3,1,1,1,es);
stats=generateFakeJobStats(2,2,false,true);
es.update(stats);
testExecutionSummarizer(9,14,0,4,1,1,2,es);
JobFactory factory=new FakeJobFactory(conf);
factory.numJobsInTrace=3;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testGridmixSummary");
Path testTraceFile=new Path(testDir,"test-trace.json");
FileSystem fs=FileSystem.getLocal(conf);
fs.create(testTraceFile).close();
UserResolver resolver=new RoundRobinUserResolver();
DataStatistics dataStats=new DataStatistics(100,2,true);
String policy=GridmixJobSubmissionPolicy.REPLAY.name();
conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy);
es.finalize(factory,testTraceFile.toString(),1024L,resolver,dataStats,conf);
assertEquals("Mismtach in num jobs in trace",3,es.getNumJobsInTrace());
String tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
Path qPath=fs.makeQualified(testTraceFile);
assertEquals("Mismatch in trace filename",qPath.toString(),es.getInputTraceLocation());
assertEquals("Mismatch in expected data size","1 K",es.getExpectedDataSize());
assertEquals("Mismatch in input data statistics",ExecutionSummarizer.stringifyDataStatistics(dataStats),es.getInputDataStatistics());
assertEquals("Mismatch in user resolver",resolver.getClass().getName(),es.getUserResolver());
assertEquals("Mismatch in policy",policy,es.getJobSubmissionPolicy());
es.finalize(factory,testTraceFile.toString(),1024 * 1024 * 1024* 10L,resolver,dataStats,conf);
assertEquals("Mismatch in expected data size","10 G",es.getExpectedDataSize());
fs.delete(testTraceFile,false);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace data size",Summarizer.NA,es.getExpectedDataSize());
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
testTraceFile=new Path(testDir,"test-trace2.json");
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
es.finalize(factory,"-",0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace signature",Summarizer.NA,es.getInputTraceSignature());
assertEquals("Mismatch in trace file location",Summarizer.NA,es.getInputTraceLocation());
}
BooleanVerifier NullVerifier HybridVerifier
/**
* Tests high ram job properties configuration.
*/
@SuppressWarnings("deprecation") @Test public void testHighRamFeatureEmulation() throws IOException {
Configuration gridmixConf=new Configuration();
gridmixConf.setBoolean(GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE,false);
testHighRamConfig(10,20,5,10,MRJobConfig.DEFAULT_MAP_MEMORY_MB,MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,MRJobConfig.DEFAULT_MAP_MEMORY_MB,MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,gridmixConf);
gridmixConf=new Configuration();
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,20 * 1024 * 1024);
testHighRamConfig(10,20,5,10,5,10,10,20,gridmixConf);
gridmixConf=new Configuration();
gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB,100);
gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB,300);
testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf);
gridmixConf=new Configuration();
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,70 * 1024 * 1024);
Boolean failed=null;
try {
testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding map memory limit " + "(deprecation)!",failed);
gridmixConf=new Configuration();
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,150 * 1024 * 1024);
failed=null;
try {
testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding reduce memory limit " + "(deprecation)!",failed);
gridmixConf=new Configuration();
gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB,70);
failed=null;
try {
testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding map memory limit!",failed);
gridmixConf=new Configuration();
gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB,200);
failed=null;
try {
testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding reduce memory limit!",failed);
}
InternalCallVerifier BooleanVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate different words given
* different seeds.
*/
@Test public void testRandomTextDataGeneratorUniqueness(){
RandomTextDataGenerator rtdg1=new RandomTextDataGenerator(10,1L,5);
Set words1=new HashSet(rtdg1.getRandomWords());
RandomTextDataGenerator rtdg2=new RandomTextDataGenerator(10,0L,5);
Set words2=new HashSet(rtdg2.getRandomWords());
assertFalse("List size mismatch across lists",words1.equals(words2));
}
InternalCallVerifier BooleanVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate same words given the
* same list-size, word-length and seed.
*/
@Test public void testRandomTextDataGeneratorRepeatability(){
RandomTextDataGenerator rtdg1=new RandomTextDataGenerator(10,0L,5);
List words1=rtdg1.getRandomWords();
RandomTextDataGenerator rtdg2=new RandomTextDataGenerator(10,0L,5);
List words2=rtdg2.getRandomWords();
assertTrue("List mismatch",words1.equals(words2));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link LoadJob.ResourceUsageMatcherRunner}.
*/
@Test @SuppressWarnings("unchecked") public void testResourceUsageMatcherRunner() throws Exception {
Configuration conf=new Configuration();
FakeProgressive progress=new FakeProgressive();
conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,DummyResourceCalculatorPlugin.class,ResourceCalculatorPlugin.class);
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class);
long currentTime=System.currentTimeMillis();
TaskAttemptID id=new TaskAttemptID("test",1,TaskType.MAP,1,1);
StatusReporter reporter=new DummyReporter(progress);
TaskInputOutputContext context=new MapContextImpl(conf,id,null,null,null,reporter,null);
FakeResourceUsageMatcherRunner matcher=new FakeResourceUsageMatcherRunner(context,null);
String identifier=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long initTime=TestResourceUsageEmulatorPlugin.testInitialization(identifier,conf);
assertTrue("ResourceUsageMatcherRunner failed to initialize the" + " configured plugin",initTime > currentTime);
assertEquals("Progress mismatch in ResourceUsageMatcherRunner",0,progress.getProgress(),0D);
progress.setProgress(0.01f);
currentTime=System.currentTimeMillis();
matcher.test();
long emulateTime=TestResourceUsageEmulatorPlugin.testEmulation(identifier,conf);
assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate" + " the configured plugin",emulateTime > currentTime);
}
InternalCallVerifier BooleanVerifier
/**
* Test {@link ResourceUsageMatcher}.
*/
@Test public void testResourceUsageMatcher() throws Exception {
ResourceUsageMatcher matcher=new ResourceUsageMatcher();
Configuration conf=new Configuration();
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class);
long currentTime=System.currentTimeMillis();
matcher.configure(conf,null,null,null);
matcher.matchResourceUsage();
String id=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long result=TestResourceUsageEmulatorPlugin.testInitialization(id,conf);
assertTrue("Resource usage matcher failed to initialize the configured" + " plugin",result > currentTime);
result=TestResourceUsageEmulatorPlugin.testEmulation(id,conf);
assertTrue("Resource usage matcher failed to load and emulate the" + " configured plugin",result > currentTime);
conf.setStrings(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestCpu.class.getName() + "," + TestOthers.class.getName());
matcher.configure(conf,null,null,null);
long time1=TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID,conf);
long time2=TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,conf);
assertTrue("Resource usage matcher failed to initialize the configured" + " plugins in order",time1 < time2);
matcher.matchResourceUsage();
time1=TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID,conf);
time2=TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,conf);
assertTrue("Resource usage matcher failed to load the configured plugins",time1 < time2);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSubmitterResolver() throws Exception {
final UserResolver rslv=new SubmitterUserResolver();
assertFalse(rslv.needsTargetUsersList());
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
assertEquals(ugi,rslv.getTargetUgi((UserGroupInformation)null));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testAddingDependingJob() throws Exception {
Job job_1=getCopyJob();
ArrayList dependingJobs=new ArrayList();
JobControl jc=new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING,job_1.getState());
Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),dependingJobs)));
}
BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testInitNextRecordReader() throws IOException {
JobConf conf=new JobConf();
Path[] paths=new Path[3];
long[] fileLength=new long[3];
File[] files=new File[3];
LongWritable key=new LongWritable(1);
Text value=new Text();
try {
for (int i=0; i < 3; i++) {
fileLength[i]=i;
File dir=new File(outDir.toString());
dir.mkdir();
files[i]=new File(dir,"testfile" + i);
FileWriter fileWriter=new FileWriter(files[i]);
fileWriter.close();
paths[i]=new Path(outDir + "/testfile" + i);
}
CombineFileSplit combineFileSplit=new CombineFileSplit(conf,paths,fileLength);
Reporter reporter=Mockito.mock(Reporter.class);
CombineFileRecordReader cfrr=new CombineFileRecordReader(conf,combineFileSplit,reporter,TextRecordReaderWrapper.class);
verify(reporter).progress();
Assert.assertFalse(cfrr.next(key,value));
verify(reporter,times(3)).progress();
}
finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test DBRecordReader. This reader should creates keys, values, know about position..
*/
@SuppressWarnings("unchecked") @Test(timeout=5000) public void testDBRecordReader() throws Exception {
JobConf job=mock(JobConf.class);
DBConfiguration dbConfig=mock(DBConfiguration.class);
String[] fields={"field1","filed2"};
@SuppressWarnings("rawtypes") DBRecordReader reader=new DBInputFormat().new DBRecordReader(new DBInputSplit(),NullDBWritable.class,job,DriverForTest.getConnection(),dbConfig,"condition",fields,"table");
LongWritable key=reader.createKey();
assertEquals(0,key.get());
DBWritable value=reader.createValue();
assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",value.getClass().getName());
assertEquals(0,reader.getPos());
assertFalse(reader.next(key,value));
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Submitter
* @throws Exception
*/
@Test public void testSubmitter() throws Exception {
JobConf conf=new JobConf();
File[] psw=cleanTokenPasswordFile();
System.setProperty("test.build.data","target/tmp/build/TEST_SUBMITTER_MAPPER/data");
conf.set("hadoop.log.dir","target/tmp");
Submitter.setIsJavaMapper(conf,false);
Submitter.setIsJavaReducer(conf,false);
Submitter.setKeepCommandFile(conf,false);
Submitter.setIsJavaRecordReader(conf,false);
Submitter.setIsJavaRecordWriter(conf,false);
PipesPartitioner partitioner=new PipesPartitioner();
partitioner.configure(conf);
Submitter.setJavaPartitioner(conf,partitioner.getClass());
assertEquals(PipesPartitioner.class,(Submitter.getJavaPartitioner(conf)));
SecurityManager securityManager=System.getSecurityManager();
PrintStream oldps=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
ExitUtil.disableSystemExit();
try {
System.setOut(new PrintStream(out));
Submitter.main(new String[0]);
fail();
}
catch ( ExitUtil.ExitException e) {
assertTrue(out.toString().contains(""));
assertTrue(out.toString().contains("bin/hadoop pipes"));
assertTrue(out.toString().contains("[-input ] // Input directory"));
assertTrue(out.toString().contains("[-output ] // Output directory"));
assertTrue(out.toString().contains("[-jar // jar filename"));
assertTrue(out.toString().contains("[-inputformat ] // InputFormat class"));
assertTrue(out.toString().contains("[-map ] // Java Map class"));
assertTrue(out.toString().contains("[-partitioner ] // Java Partitioner"));
assertTrue(out.toString().contains("[-reduce ] // Java Reduce class"));
assertTrue(out.toString().contains("[-writer ] // Java RecordWriter"));
assertTrue(out.toString().contains("[-program ] // executable URI"));
assertTrue(out.toString().contains("[-reduces ] // number of reduces"));
assertTrue(out.toString().contains("[-lazyOutput ] // createOutputLazily"));
assertTrue(out.toString().contains("-conf specify an application configuration file"));
assertTrue(out.toString().contains("-D use value for given property"));
assertTrue(out.toString().contains("-fs specify a namenode"));
assertTrue(out.toString().contains("-jt specify a job tracker"));
assertTrue(out.toString().contains("-files specify comma separated files to be copied to the map reduce cluster"));
assertTrue(out.toString().contains("-libjars specify comma separated jar files to include in the classpath."));
assertTrue(out.toString().contains("-archives specify comma separated archives to be unarchived on the compute machines."));
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
try {
File fCommand=getFileCommand(null);
String[] args=new String[22];
File input=new File(workSpace + File.separator + "input");
if (!input.exists()) {
Assert.assertTrue(input.createNewFile());
}
File outPut=new File(workSpace + File.separator + "output");
FileUtil.fullyDelete(outPut);
args[0]="-input";
args[1]=input.getAbsolutePath();
args[2]="-output";
args[3]=outPut.getAbsolutePath();
args[4]="-inputformat";
args[5]="org.apache.hadoop.mapred.TextInputFormat";
args[6]="-map";
args[7]="org.apache.hadoop.mapred.lib.IdentityMapper";
args[8]="-partitioner";
args[9]="org.apache.hadoop.mapred.pipes.PipesPartitioner";
args[10]="-reduce";
args[11]="org.apache.hadoop.mapred.lib.IdentityReducer";
args[12]="-writer";
args[13]="org.apache.hadoop.mapred.TextOutputFormat";
args[14]="-program";
args[15]=fCommand.getAbsolutePath();
args[16]="-reduces";
args[17]="2";
args[18]="-lazyOutput";
args[19]="lazyOutput";
args[20]="-jobconf";
args[21]="mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false";
Submitter.main(args);
fail();
}
catch ( ExitUtil.ExitException e) {
assertEquals(e.status,0);
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test PipesMapRunner test the transfer data from reader
* @throws Exception
*/
@Test public void testRunner() throws Exception {
File[] psw=cleanTokenPasswordFile();
try {
RecordReader rReader=new ReaderPipesMapRunner();
JobConf conf=new JobConf();
conf.set(Submitter.IS_JAVA_RR,"true");
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
TestTaskReporter reporter=new TestTaskReporter();
PipesMapRunner runner=new PipesMapRunner();
initStdOut(conf);
runner.configure(conf);
runner.run(rReader,output,reporter);
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
assertTrue(stdOut.contains("Key class:org.apache.hadoop.io.FloatWritable"));
assertTrue(stdOut.contains("Value class:org.apache.hadoop.io.NullWritable"));
assertTrue(stdOut.contains("value:0.0"));
assertTrue(stdOut.contains("value:9.0"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
* @throws Throwable
*/
@Test public void testApplication() throws Throwable {
JobConf conf=new JobConf();
RecordReader rReader=new Reader();
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter=new TestTaskReporter();
File[] psw=cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
FakeCollector output=new FakeCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE,"true");
initStdOut(conf);
Application,Writable,IntWritable,Text> application=new Application,Writable,IntWritable,Text>(conf,rReader,output,reporter,IntWritable.class,Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3),new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
assertEquals(1.0,reporter.getProgress(),0.01);
assertNotNull(reporter.getCounter("group","name"));
assertEquals(reporter.getStatus(),"PROGRESS");
stdOut=readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile"));
assertEquals(0.55f,rReader.getProgress(),0.001);
application.getDownlink().close();
Entry entry=output.getCollect().entrySet().iterator().next();
assertEquals(123,entry.getKey().get());
assertEquals("value",entry.getValue().toString());
try {
application.abort(new Throwable());
fail();
}
catch ( IOException e) {
assertEquals("pipe child exception",e.getMessage());
}
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test org.apache.hadoop.mapred.pipes.PipesReducer
* test the transfer of data: key and value
* @throws Exception
*/
@Test public void testPipesReduser() throws Exception {
File[] psw=cleanTokenPasswordFile();
JobConf conf=new JobConf();
try {
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeReducerStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
PipesReducer reducer=new PipesReducer();
reducer.configure(conf);
BooleanWritable bw=new BooleanWritable(true);
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
initStdOut(conf);
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
Reporter reporter=new TestTaskReporter();
List texts=new ArrayList();
texts.add(new Text("first"));
texts.add(new Text("second"));
texts.add(new Text("third"));
reducer.reduce(bw,texts.iterator(),output,reporter);
reducer.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("reducer key :true"));
assertTrue(stdOut.contains("reduce value :first"));
assertTrue(stdOut.contains("reduce value :second"));
assertTrue(stdOut.contains("reduce value :third"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test PipesNonJavaInputFormat
*/
@Test public void testFormat() throws IOException {
PipesNonJavaInputFormat inputFormat=new PipesNonJavaInputFormat();
JobConf conf=new JobConf();
Reporter reporter=mock(Reporter.class);
RecordReader reader=inputFormat.getRecordReader(new FakeSplit(),conf,reporter);
assertEquals(0.0f,reader.getProgress(),0.001);
File input1=new File(workSpace + File.separator + "input1");
if (!input1.getParentFile().exists()) {
Assert.assertTrue(input1.getParentFile().mkdirs());
}
if (!input1.exists()) {
Assert.assertTrue(input1.createNewFile());
}
File input2=new File(workSpace + File.separator + "input2");
if (!input2.exists()) {
Assert.assertTrue(input2.createNewFile());
}
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,StringUtils.escapeString(input1.getAbsolutePath()) + "," + StringUtils.escapeString(input2.getAbsolutePath()));
InputSplit[] splits=inputFormat.getSplits(conf,2);
assertEquals(2,splits.length);
PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader=new PipesNonJavaInputFormat.PipesDummyRecordReader(conf,splits[0]);
assertNull(dummyRecordReader.createKey());
assertNull(dummyRecordReader.createValue());
assertEquals(0,dummyRecordReader.getPos());
assertEquals(0.0,dummyRecordReader.getProgress(),0.001);
assertTrue(dummyRecordReader.next(new FloatWritable(2.0f),NullWritable.get()));
assertEquals(2.0,dummyRecordReader.getProgress(),0.001);
dummyRecordReader.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testClusterWithLocalClientProvider() throws Exception {
Configuration conf=new Configuration();
try {
conf.set(MRConfig.FRAMEWORK_NAME,"incorrect");
new Cluster(conf);
fail("Cluster should not be initialized with incorrect framework name");
}
catch ( IOException e) {
}
conf.set(MRConfig.FRAMEWORK_NAME,"local");
Cluster cluster=new Cluster(conf);
assertTrue(cluster.getClient() instanceof LocalJobRunner);
cluster.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobMonitorAndPrint() throws Exception {
JobStatus jobStatus_1=new JobStatus(new JobID("job_000",1),1f,0.1f,0.1f,0f,State.RUNNING,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
JobStatus jobStatus_2=new JobStatus(new JobID("job_000",1),1f,1f,1f,1f,State.SUCCEEDED,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
doAnswer(new Answer(){
@Override public TaskCompletionEvent[] answer( InvocationOnMock invocation) throws Throwable {
return new TaskCompletionEvent[0];
}
}
).when(job).getTaskCompletionEvents(anyInt(),anyInt());
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1,jobStatus_2);
Layout layout=Logger.getRootLogger().getAppender("stdout").getLayout();
ByteArrayOutputStream os=new ByteArrayOutputStream();
WriterAppender appender=new WriterAppender(layout,os);
appender.setThreshold(Level.ALL);
Logger qlogger=Logger.getLogger(Job.class);
qlogger.addAppender(appender);
job.monitorAndPrintJob();
qlogger.removeAppender(appender);
LineNumberReader r=new LineNumberReader(new StringReader(os.toString()));
String line;
boolean foundHundred=false;
boolean foundComplete=false;
boolean foundUber=false;
String uberModeMatch="uber mode : true";
String progressMatch="map 100% reduce 100%";
String completionMatch="completed successfully";
while ((line=r.readLine()) != null) {
if (line.contains(uberModeMatch)) {
foundUber=true;
}
foundHundred=line.contains(progressMatch);
if (foundHundred) break;
}
line=r.readLine();
foundComplete=line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
InternalCallVerifier BooleanVerifier
/**
* Run a test with a misconfigured number of mappers.
* Expect failure.
*/
@Test public void testInvalidMultiMapParallelism() throws Exception {
Job job=Job.getInstance();
Path inputPath=createMultiMapsInput();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job,-6);
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean success=job.waitForCompletion(true);
assertFalse("Job succeeded somehow",success);
}
InternalCallVerifier BooleanVerifier
/**
* Test case for zero mappers
*/
@Test public void testEmptyMaps() throws Exception {
Job job=Job.getInstance();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setInputFormatClass(EmptyInputFormat.class);
job.setNumReduceTasks(1);
FileOutputFormat.setOutputPath(job,outputPath);
boolean success=job.waitForCompletion(true);
assertTrue("Empty job should work",success);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that the GC counter actually increments when we know that we've
* spent some time in the GC during the mapper.
*/
@Test public void testGcCounter() throws Exception {
Path inputPath=getInputPath();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
if (fs.exists(inputPath)) {
fs.delete(inputPath,true);
}
createInputFile(inputPath,0,20);
Job job=Job.getInstance();
job.setMapperClass(GCMapper.class);
job.setNumReduceTasks(0);
job.getConfiguration().set(MRJobConfig.IO_SORT_MB,"25");
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean ret=job.waitForCompletion(true);
assertTrue("job failed",ret);
Counter gcCounter=job.getCounters().findCounter(TaskCounter.GC_TIME_MILLIS);
assertNotNull(gcCounter);
assertTrue("No time spent in gc",gcCounter.getValue() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testJobSubmission() throws Exception {
JobConf conf=new JobConf();
Job job=new Job(conf);
job.setInputFormatClass(TestInputFormat.class);
job.setMapperClass(TestMapper.class);
job.setOutputFormatClass(TestOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
}
BooleanVerifier
@Test public void testReduceCleanup() throws Exception {
reset();
Job job=Job.getInstance();
Path inputPath=createInput();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setMapperClass(TrackingTokenizerMapper.class);
job.setReducerClass(FailingReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(1);
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(reduceCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
BooleanVerifier
@Test public void testJobSuccessCleanup() throws Exception {
reset();
Job job=Job.getInstance();
Path inputPath=createInput();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setMapperClass(TrackingTokenizerMapper.class);
job.setReducerClass(TrackingIntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(1);
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(reduceCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
BooleanVerifier
@Test public void testMapCleanup() throws Exception {
reset();
Job job=Job.getInstance();
Path inputPath=createInput();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setMapperClass(FailingMapper.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf=new JobConf();
conf.set("mapreduce.framework.name","local");
Job job=new Job(conf);
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine",0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters=job.getCounters();
long combinerInputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-r-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testPluginAbility(){
try {
JobConf jobConf=new JobConf();
jobConf.setClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,TestShufflePlugin.TestShuffleConsumerPlugin.class,ShuffleConsumerPlugin.class);
ShuffleConsumerPlugin shuffleConsumerPlugin=null;
Class extends ShuffleConsumerPlugin> clazz=jobConf.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,Shuffle.class,ShuffleConsumerPlugin.class);
assertNotNull("Unable to get " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,clazz);
shuffleConsumerPlugin=ReflectionUtils.newInstance(clazz,jobConf);
assertNotNull("Unable to load " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,shuffleConsumerPlugin);
}
catch ( Exception e) {
assertTrue("Threw exception:" + e,false);
}
}
BooleanVerifier
@Test public void testConsumerApi(){
JobConf jobConf=new JobConf();
ShuffleConsumerPlugin shuffleConsumerPlugin=new TestShuffleConsumerPlugin();
ReduceTask mockReduceTask=mock(ReduceTask.class);
TaskUmbilicalProtocol mockUmbilical=mock(TaskUmbilicalProtocol.class);
Reporter mockReporter=mock(Reporter.class);
FileSystem mockFileSystem=mock(FileSystem.class);
Class extends org.apache.hadoop.mapred.Reducer> combinerClass=jobConf.getCombinerClass();
@SuppressWarnings("unchecked") CombineOutputCollector mockCombineOutputCollector=(CombineOutputCollector)mock(CombineOutputCollector.class);
org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID=mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
LocalDirAllocator mockLocalDirAllocator=mock(LocalDirAllocator.class);
CompressionCodec mockCompressionCodec=mock(CompressionCodec.class);
Counter mockCounter=mock(Counter.class);
TaskStatus mockTaskStatus=mock(TaskStatus.class);
Progress mockProgress=mock(Progress.class);
MapOutputFile mockMapOutputFile=mock(MapOutputFile.class);
Task mockTask=mock(Task.class);
try {
String[] dirs=jobConf.getLocalDirs();
ShuffleConsumerPlugin.Context context=new ShuffleConsumerPlugin.Context(mockTaskAttemptID,jobConf,mockFileSystem,mockUmbilical,mockLocalDirAllocator,mockReporter,mockCompressionCodec,combinerClass,mockCombineOutputCollector,mockCounter,mockCounter,mockCounter,mockCounter,mockCounter,mockCounter,mockTaskStatus,mockProgress,mockProgress,mockTask,mockMapOutputFile,null);
shuffleConsumerPlugin.init(context);
shuffleConsumerPlugin.run();
shuffleConsumerPlugin.close();
}
catch ( Exception e) {
assertTrue("Threw exception:" + e,false);
}
mockReduceTask.getTaskID();
mockReduceTask.getJobID();
mockReduceTask.getNumMaps();
mockReduceTask.getPartition();
mockReporter.progress();
}
BooleanVerifier
@Test public void testProviderApi(){
LocalDirAllocator mockLocalDirAllocator=mock(LocalDirAllocator.class);
JobConf mockJobConf=mock(JobConf.class);
try {
mockLocalDirAllocator.getLocalPathToRead("",mockJobConf);
}
catch ( Exception e) {
assertTrue("Threw exception:" + e,false);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier IgnoredMethod HybridVerifier
/**
* Tests context.setStatus method.
* TODO fix testcase
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test @Ignore public void testContextStatus() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testContextStatus");
int numMaps=1;
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,0);
job.setMapperClass(MyMapper.class);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
TaskReport[] reports=job.getTaskReports(TaskType.MAP);
assertEquals(numMaps,reports.length);
assertEquals(myStatus,reports[0].getState());
int numReduces=1;
job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,numReduces);
job.setMapperClass(DataCopyMapper.class);
job.setReducerClass(DataCopyReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(0);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests new MapReduce reduce task's context.getProgress() method.
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test public void testReduceContextProgress() throws IOException, InterruptedException, ClassNotFoundException {
int numTasks=1;
Path test=new Path(testRootTempDir,"testReduceContextProgress");
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numTasks,numTasks,INPUT);
job.setMapperClass(ProgressCheckerMapper.class);
job.setReducerClass(ProgressCheckerReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(1);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
InternalCallVerifier BooleanVerifier
@Test public void testClusterWithYarnClientProtocolProvider() throws Exception {
Configuration conf=new Configuration(false);
Cluster cluster=null;
try {
cluster=new Cluster(conf);
}
catch ( Exception e) {
throw new Exception("Failed to initialize a local runner w/o a cluster framework key",e);
}
try {
assertTrue("client is not a LocalJobRunner",cluster.getClient() instanceof LocalJobRunner);
}
finally {
if (cluster != null) {
cluster.close();
}
}
try {
conf=new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
cluster=new Cluster(conf);
ClientProtocol client=cluster.getClient();
assertTrue("client is a YARNRunner",client instanceof YARNRunner);
}
catch ( IOException e) {
}
finally {
if (cluster != null) {
cluster.close();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testClusterGetDelegationToken() throws Exception {
Configuration conf=new Configuration(false);
Cluster cluster=null;
try {
conf=new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
cluster=new Cluster(conf);
YARNRunner yrunner=(YARNRunner)cluster.getClient();
GetDelegationTokenResponse getDTResponse=recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
org.apache.hadoop.yarn.api.records.Token rmDTToken=recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class);
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
final ApplicationClientProtocol cRMProtocol=mock(ApplicationClientProtocol.class);
when(cRMProtocol.getDelegationToken(any(GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
ResourceMgrDelegate rmgrDelegate=new ResourceMgrDelegate(new YarnConfiguration(conf)){
@Override protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(cRMProtocol);
}
}
;
yrunner.setResourceMgrDelegate(rmgrDelegate);
Token t=cluster.getDelegationToken(new Text(" "));
assertTrue("Token kind is instead " + t.getKind().toString(),"Testclusterkind".equals(t.getKind().toString()));
}
finally {
if (cluster != null) {
cluster.close();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDelete() throws Exception {
FileSystem fs=mock(FileSystem.class);
Path chkloc=new Path("/chk/chk0");
when(fs.delete(eq(chkloc),eq(false))).thenReturn(true);
Path base=new Path("/otherchk");
FSCheckpointID id=new FSCheckpointID(chkloc);
FSCheckpointService chk=new FSCheckpointService(fs,base,new SimpleNamingService("chk0"),(short)1);
assertTrue(chk.delete(id));
verify(fs).delete(eq(chkloc),eq(false));
}
BooleanVerifier
/**
* Tests {@link DistributedCache#checkURIs(URI[], URI[]).}
*/
@Test public void testURIs() throws URISyntaxException {
assertTrue(DistributedCache.checkURIs(null,null));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile.txt")},null));
assertFalse(DistributedCache.checkURIs(null,new URI[]{new URI("file://foo/bar/myCacheArchive.txt")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file"),new URI("file://foo/bar/myCacheFile2.txt")},null));
assertFalse(DistributedCache.checkURIs(null,new URI[]{new URI("file://foo/bar/myCacheArchive1.txt"),new URI("file://foo/bar/myCacheArchive2.txt#archive")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile.txt")},new URI[]{new URI("file://foo/bar/myCacheArchive.txt")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file"),new URI("file://foo/bar/myCacheFile2.txt#file")},null));
assertFalse(DistributedCache.checkURIs(null,new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive"),new URI("file://foo/bar/myCacheArchive2.txt#archive")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile.txt#cache")},new URI[]{new URI("file://foo/bar/myCacheArchive.txt#cache")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#file2")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive"),new URI("file://foo/bar/myCacheArchive2.txt#archive")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file"),new URI("file://foo/bar/myCacheFile2.txt#file")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive1"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#cache")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#cache"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file"),new URI("file://foo/bar/myCacheFile2.txt#FILE")},null));
assertFalse(DistributedCache.checkURIs(null,new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive"),new URI("file://foo/bar/myCacheArchive2.txt#ARCHIVE")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile.txt#cache")},new URI[]{new URI("file://foo/bar/myCacheArchive.txt#CACHE")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#file2")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#ARCHIVE"),new URI("file://foo/bar/myCacheArchive2.txt#archive")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#FILE"),new URI("file://foo/bar/myCacheFile2.txt#file")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive1"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
assertFalse(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#CACHE")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#cache"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
assertTrue(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#file2")},null));
assertTrue(DistributedCache.checkURIs(null,new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive1"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
assertTrue(DistributedCache.checkURIs(new URI[]{new URI("file://foo/bar/myCacheFile1.txt#file1"),new URI("file://foo/bar/myCacheFile2.txt#file2")},new URI[]{new URI("file://foo/bar/myCacheArchive1.txt#archive1"),new URI("file://foo/bar/myCacheArchive2.txt#archive2")}));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testEvents() throws Exception {
EventReader reader=new EventReader(new DataInputStream(new ByteArrayInputStream(getEvents())));
HistoryEvent e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED));
assertEquals("ID",((JobPriorityChange)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED));
assertEquals("ID",((JobStatusChanged)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.TASK_UPDATED));
assertEquals("ID",((TaskUpdated)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_KILLED));
assertEquals("ID",((JobUnsuccessfulCompletion)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
reader.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testSigTermedFunctionality() throws IOException {
AppContext mockedContext=Mockito.mock(AppContext.class);
JHEventHandlerForSigtermTest jheh=new JHEventHandlerForSigtermTest(mockedContext,0);
JobId jobId=Mockito.mock(JobId.class);
jheh.addToFileMap(jobId);
final int numEvents=4;
JobHistoryEvent events[]=new JobHistoryEvent[numEvents];
for (int i=0; i < numEvents; ++i) {
events[i]=getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
assertTrue("handleEvent should've been called only 4 times but was " + jheh.eventsHandled,jheh.eventsHandled == 4);
jheh=new JHEventHandlerForSigtermTest(mockedContext,0);
Job job=Mockito.mock(Job.class);
Mockito.when(mockedContext.getJob(jobId)).thenReturn(job);
ApplicationId mockAppId=Mockito.mock(ApplicationId.class);
Mockito.when(mockAppId.getClusterTimestamp()).thenReturn(1000l);
Mockito.when(jobId.getAppId()).thenReturn(mockAppId);
jheh.addToFileMap(jobId);
jheh.setForcejobCompletion(true);
for (int i=0; i < numEvents; ++i) {
events[i]=getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
assertTrue("handleEvent should've been called only 5 times but was " + jheh.eventsHandled,jheh.eventsHandled == 5);
assertTrue("Last event handled wasn't JobUnsuccessfulCompletionEvent",jheh.lastEventHandled.getHistoryEvent() instanceof JobUnsuccessfulCompletionEvent);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=50000) public void testDefaultFsIsUsedForHistory() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///");
TestParams t=new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,t.dfsWorkDir);
JHEvenHandlerForTest realJheh=new JHEvenHandlerForTest(t.mockAppContext,0,false);
JHEvenHandlerForTest jheh=spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh,new JobHistoryEvent(t.jobId,new AMStartedEvent(t.appAttemptId,200,t.containerId,"nmhost",3000,4000)));
handleEvent(jheh,new JobHistoryEvent(t.jobId,new JobFinishedEvent(TypeConverter.fromYarn(t.jobId),0,0,0,0,0,new Counters(),new Counters(),new Counters())));
FileSystem dfsFileSystem=dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem=LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",localFileSystem.exists(new Path(t.dfsWorkDir)));
}
finally {
jheh.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testEscapeJobSummary(){
summary.setJobName("aa\rbb\ncc\r\ndd");
String out=summary.getJobSummaryString();
LOG.info("summary: " + out);
Assert.assertFalse(out.contains("\r"));
Assert.assertFalse(out.contains("\n"));
Assert.assertTrue(out.contains("aa\\rbb\\ncc\\r\\ndd"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input files are from non-default file systems
*/
@Test public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,DUMMY_FS_URI);
assertEquals(DUMMY_FS_URI,FileSystem.getDefaultUri(conf).toString());
Path localPath=new Path("testFile1");
FileSystem lfs=FileSystem.getLocal(conf);
FSDataOutputStream dos=lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,lfs.makeQualified(localPath));
DummyInputFormat inFormat=new DummyInputFormat();
List splits=inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for ( InputSplit s : splits) {
CombineFileSplit cfs=(CombineFileSplit)s;
for ( Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(),"file");
}
}
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testProgressIsReportedIfInputASeriesOfEmptyFiles() throws IOException, InterruptedException {
JobConf conf=new JobConf();
Path[] paths=new Path[3];
File[] files=new File[3];
long[] fileLength=new long[3];
try {
for (int i=0; i < 3; i++) {
File dir=new File(outDir.toString());
dir.mkdir();
files[i]=new File(dir,"testfile" + i);
FileWriter fileWriter=new FileWriter(files[i]);
fileWriter.flush();
fileWriter.close();
fileLength[i]=i;
paths[i]=new Path(outDir + "/testfile" + i);
}
CombineFileSplit combineFileSplit=new CombineFileSplit(paths,fileLength);
TaskAttemptID taskAttemptID=Mockito.mock(TaskAttemptID.class);
TaskReporter reporter=Mockito.mock(TaskReporter.class);
TaskAttemptContextImpl taskAttemptContext=new TaskAttemptContextImpl(conf,taskAttemptID,reporter);
CombineFileRecordReader cfrr=new CombineFileRecordReader(combineFileSplit,taskAttemptContext,TextRecordReaderWrapper.class);
cfrr.initialize(combineFileSplit,taskAttemptContext);
verify(reporter).progress();
Assert.assertFalse(cfrr.nextKeyValue());
verify(reporter,times(3)).progress();
}
finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException {
Job job=Job.getInstance(conf);
Random random=new Random();
long seed=random.nextLong();
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random,job);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat format=new CombineSequenceFileInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key=reader.getCurrentKey();
BytesWritable value=reader.getCurrentValue();
assertNotNull("Value should not be null.",value);
final int k=key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.",bits.get(k));
bits.set(k);
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
try {
int count=0;
while (reader.nextKeyValue()) {
LongWritable key=reader.getCurrentKey();
assertNotNull("Key should not be null.",key);
Text value=reader.getCurrentValue();
final int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf=new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
configureTestErrorOnNonExistantDir(conf,localFs);
Job job=Job.getInstance(conf);
FileInputFormat,?> fif=new TextInputFormat();
try {
fif.listStatus(job);
Assert.fail("Expecting an IOException for a missing Input path");
}
catch ( IOException e) {
Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2");
expectedExceptionPath=localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitLocationInfo() throws Exception {
Configuration conf=getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
Job job=Job.getInstance(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
String[] locations=splits.get(0).getLocations();
Assert.assertEquals(2,locations.length);
SplitLocationInfo[] locationInfo=splits.get(0).getLocationInfo();
Assert.assertEquals(2,locationInfo.length);
SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
BooleanVerifier
/**
* Test with record length set to 0
*/
@Test(timeout=5000) public void testZeroRecordLength() throws Exception {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
Job job=Job.getInstance(defaultConf);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(),0);
FileInputFormat.setInputPaths(job,workDir);
List splits=format.getSplits(job);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for zero record length:",exceptionThrown);
}
BooleanVerifier
/**
* Test with record length set to a negative value
*/
@Test(timeout=5000) public void testNegativeRecordLength() throws Exception {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
Job job=Job.getInstance(defaultConf);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(),-10);
FileInputFormat.setInputPaths(job,workDir);
List splits=format.getSplits(job);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for negative record length:",exceptionThrown);
}
BooleanVerifier
/**
* Test with no record length set.
*/
@Test(timeout=5000) public void testNoRecordLength() throws Exception {
localFs.delete(workDir,true);
Path file=new Path(workDir,new String("testFormat.txt"));
createFile(file,null,10,10);
Job job=Job.getInstance(defaultConf);
FileInputFormat.setInputPaths(job,workDir);
FixedLengthInputFormat format=new FixedLengthInputFormat();
List splits=format.getSplits(job);
boolean exceptionThrown=false;
for ( InputSplit split : splits) {
try {
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
}
catch ( IOException ioe) {
exceptionThrown=true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for not setting record length:",exceptionThrown);
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void testStripBOM() throws IOException {
String UTF8_BOM="\uFEFF";
URL testFileUrl=getClass().getClassLoader().getResource("testBOM.txt");
assertNotNull("Cannot find testBOM.txt",testFileUrl);
File testFile=new File(testFileUrl.getFile());
Path testFilePath=new Path(testFile.getAbsolutePath());
long testFileSize=testFile.length();
Configuration conf=new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,Integer.MAX_VALUE);
TaskAttemptContext context=new TaskAttemptContextImpl(conf,new TaskAttemptID());
FileSplit split=new FileSplit(testFilePath,0,testFileSize,(String[])null);
LineRecordReader reader=new LineRecordReader();
reader.initialize(split,context);
int numRecords=0;
boolean firstLine=true;
boolean skipBOM=true;
while (reader.nextKeyValue()) {
if (firstLine) {
firstLine=false;
if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) {
skipBOM=false;
}
}
++numRecords;
}
reader.close();
assertTrue("BOM is not skipped",skipBOM);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitableCodecs() throws Exception {
final Job job=Job.getInstance(defaultConf);
final Configuration conf=job.getConfiguration();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=500000;
FileInputFormat.setMaxInputSplitSize(job,MAX_LENGTH / 20);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
assertTrue("KVTIF claims not splittable",format.isSplitable(job,file));
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
value=reader.getCurrentValue();
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + k + ","+ v);
assertFalse(k + "," + v+ " in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
if (count > 0) {
LOG.info("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
else {
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Path file=new Path(workDir,"test.txt");
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=10000;
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.",KeyValueLineRecordReader.class,clazz);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
clazz=key.getClass();
assertEquals("Key class is Text.",Text.class,clazz);
value=reader.getCurrentValue();
clazz=value.getClass();
assertEquals("Value class is Text.",Text.class,clazz);
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDoMultipleInputs() throws IOException {
Path in1Dir=getDir(IN1_DIR);
Path in2Dir=getDir(IN2_DIR);
Path outDir=getDir(OUT_DIR);
Configuration conf=createJobConf();
FileSystem fs=FileSystem.get(conf);
fs.delete(outDir,true);
DataOutputStream file1=fs.create(new Path(in1Dir,"part-0"));
file1.writeBytes("a\nb\nc\nd\ne");
file1.close();
DataOutputStream file2=fs.create(new Path(in2Dir,"part-0"));
file2.writeBytes("a\tblah\nb\tblah\nc\tblah\nd\tblah\ne\tblah");
file2.close();
Job job=Job.getInstance(conf);
job.setJobName("mi");
MultipleInputs.addInputPath(job,in1Dir,TextInputFormat.class,MapClass.class);
MultipleInputs.addInputPath(job,in2Dir,KeyValueTextInputFormat.class,KeyValueMapClass.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
job.setReducerClass(ReducerClass.class);
FileOutputFormat.setOutputPath(job,outDir);
boolean success=false;
try {
success=job.waitForCompletion(true);
}
catch ( InterruptedException ie) {
throw new RuntimeException(ie);
}
catch ( ClassNotFoundException instante) {
throw new RuntimeException(instante);
}
if (!success) throw new RuntimeException("Job failed!");
BufferedReader output=new BufferedReader(new InputStreamReader(fs.open(new Path(outDir,"part-r-00000"))));
assertTrue(output.readLine().equals("a 2"));
assertTrue(output.readLine().equals("b 2"));
assertTrue(output.readLine().equals("c 2"));
assertTrue(output.readLine().equals("d 2"));
assertTrue(output.readLine().equals("e 2"));
}
InternalCallVerifier BooleanVerifier
@Test public void testAddingDependingJobToCompletedJobFails() throws Exception {
Configuration conf=new Configuration();
ControlledJob job1=new ControlledJob(conf);
job1.setJobState(ControlledJob.State.SUCCESS);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
InternalCallVerifier BooleanVerifier
@Test public void testAddingDependingJobToRunningJobFails() throws Exception {
Configuration conf=new Configuration();
ControlledJob job1=new ControlledJob(conf);
job1.setJobState(ControlledJob.State.RUNNING);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
BooleanVerifier
@Test public void testCircularDependency() throws IOException {
ControlledJob job1=new ControlledJob(new Configuration());
job1.setJobName("job1");
ControlledJob job2=new ControlledJob(new Configuration());
job2.setJobName("job2");
ControlledJob job3=new ControlledJob(new Configuration());
job3.setJobName("job3");
job1.addDependingJob(job2);
job2.addDependingJob(job3);
job3.addDependingJob(job1);
JobControl jobControl=new JobControl("test");
jobControl.addJob(job1);
jobControl.addJob(job2);
jobControl.addJob(job3);
try {
jobControl.run();
}
catch ( Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSuccessfulJobs() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createSuccessfulControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",4,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",0,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS);
jobControl.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testErrorWhileSubmitting() throws Exception {
JobControl jobControl=new JobControl("Test");
Job mockJob=mock(Job.class);
ControlledJob job1=new ControlledJob(mockJob,null);
when(mockJob.getConfiguration()).thenReturn(new Configuration());
doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit();
jobControl.addJob(job1);
runJobControl(jobControl);
try {
assertEquals("Success list",0,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",1,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
}
finally {
jobControl.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedJob() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createFailedControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",1,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",3,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
jobControl.stop();
}
BooleanVerifier
/**
* test partitioner for patterns
*/
@Test public void testPatterns(){
int results[]=new int[PARTITIONS];
RehashPartitioner p=new RehashPartitioner();
for (int i=0; i < END; i+=STEP) {
results[p.getPartition(new IntWritable(i),null,PARTITIONS)]++;
}
int badbuckets=0;
Integer min=Collections.min(Arrays.asList(ArrayUtils.toObject(results)));
Integer max=Collections.max(Arrays.asList(ArrayUtils.toObject(results)));
Integer avg=(int)Math.round((max + min) / 2.0);
System.out.println("Dumping buckets distribution: min=" + min + " avg="+ avg+ " max="+ max);
for (int i=0; i < PARTITIONS; i++) {
double var=(results[i] - avg) / (double)(avg);
System.out.println("bucket " + i + " "+ results[i]+ " items, variance "+ var);
if (Math.abs(var) > MAX_ERROR) badbuckets++;
}
System.out.println(badbuckets + " of " + PARTITIONS+ " are too small or large buckets");
assertTrue("too many overflow buckets",badbuckets < PARTITIONS * MAX_BADBUCKETS);
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationToken() throws IOException, InterruptedException {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
final YarnConfiguration conf=new YarnConfiguration(new JobConf());
conf.set(JHAdminConfig.MR_HISTORY_PRINCIPAL,"RandomOrc/localhost@apache.org");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
final long initialInterval=10000l;
final long maxLifetime=20000l;
final long renewInterval=10000l;
JobHistoryServer jobHistoryServer=null;
MRClientProtocol clientUsingDT=null;
long tokenFetchTime;
try {
jobHistoryServer=new JobHistoryServer(){
protected void doSecureLogin( Configuration conf) throws IOException {
}
@Override protected JHSDelegationTokenSecretManager createJHSSecretManager( Configuration conf, HistoryServerStateStoreService store){
return new JHSDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval,3600000,store);
}
@Override protected HistoryClientService createHistoryClientService(){
return new HistoryClientService(historyContext,this.jhsDTSecretManager){
@Override protected void initializeWebApp( Configuration conf){
}
}
;
}
}
;
jobHistoryServer.init(conf);
jobHistoryServer.start();
final MRClientProtocol hsService=jobHistoryServer.getClientService().getClientHandler();
UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG");
Assert.assertEquals("testrenewer",loggedInUser.getShortUserName());
loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
Token token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName());
tokenFetchTime=System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"TheDarkLord",conf);
GetJobReportRequest jobReportRequest=Records.newRecord(GetJobReportRequest.class);
jobReportRequest.setJobId(MRBuilderUtils.newJobId(123456,1,1));
try {
clientUsingDT.getJobReport(jobReportRequest);
}
catch ( IOException e) {
Assert.assertEquals("Unknown job job_123456_0001",e.getMessage());
}
while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) {
Thread.sleep(500l);
}
long nextExpTime=renewDelegationToken(loggedInUser,hsService,token);
long renewalTime=System.currentTimeMillis();
LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime);
while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) {
Thread.sleep(500l);
}
Thread.sleep(50l);
try {
clientUsingDT.getJobReport(jobReportRequest);
}
catch ( IOException e) {
Assert.assertEquals("Unknown job job_123456_0001",e.getMessage());
}
while (System.currentTimeMillis() < renewalTime + renewInterval) {
Thread.sleep(500l);
}
Thread.sleep(50l);
LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid");
try {
clientUsingDT.getJobReport(jobReportRequest);
fail("Should not have succeeded with an expired token");
}
catch ( IOException e) {
assertTrue(e.getCause().getMessage().contains("is expired"));
}
if (clientUsingDT != null) {
clientUsingDT=null;
}
token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName());
tokenFetchTime=System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf);
try {
clientUsingDT.getJobReport(jobReportRequest);
}
catch ( IOException e) {
fail("Unexpected exception" + e);
}
cancelDelegationToken(loggedInUser,hsService,token);
Token tokenWithDifferentRenewer=getDelegationToken(loggedInUser,hsService,"yarn");
cancelDelegationToken(loggedInUser,hsService,tokenWithDifferentRenewer);
if (clientUsingDT != null) {
clientUsingDT=null;
}
clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf);
LOG.info("Cancelled delegation token at: " + System.currentTimeMillis());
try {
clientUsingDT.getJobReport(jobReportRequest);
fail("Should not have succeeded with a cancelled delegation token");
}
catch ( IOException e) {
}
}
finally {
jobHistoryServer.stop();
}
}
InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test @SuppressWarnings("deprecation") public void testBinaryCredentials() throws Exception {
Path TEST_ROOT_DIR=new Path(System.getProperty("test.build.data","test/build/data"));
String binaryTokenFile=FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR,"tokenFile")).toUri().getPath();
MockFileSystem fs1=createFileSystemForServiceName("service1");
MockFileSystem fs2=createFileSystemForServiceName("service2");
MockFileSystem fs3=createFileSystemForServiceName("service3");
Credentials creds=new Credentials();
Token> token1=fs1.getDelegationToken(renewer);
Token> token2=fs2.getDelegationToken(renewer);
creds.addToken(token1.getService(),token1);
creds.addToken(token2.getService(),token2);
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile),conf);
creds=new Credentials();
Token> newerToken1=fs1.getDelegationToken(renewer);
assertNotSame(newerToken1,token1);
creds.addToken(newerToken1.getService(),newerToken1);
checkToken(creds,newerToken1);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
checkToken(creds,newerToken1,token2);
TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf);
checkToken(creds,newerToken1,token2);
TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf);
Token> token3=creds.getToken(new Text(fs3.getCanonicalServiceName()));
assertTrue(token3 != null);
checkToken(creds,newerToken1,token2,token3);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf);
TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf);
checkToken(creds,newerToken1,token2,token3);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test public void testDelegationToken() throws Exception {
final JobClient client;
client=user1.doAs(new PrivilegedExceptionAction(){
@Override public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
}
);
final JobClient bobClient;
bobClient=user2.doAs(new PrivilegedExceptionAction(){
@Override public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
}
);
final Token token=client.getDelegationToken(new Text(user1.getUserName()));
DataInputBuffer inBuf=new DataInputBuffer();
byte[] bytes=token.getIdentifier();
inBuf.reset(bytes,bytes.length);
DelegationTokenIdentifier ident=new DelegationTokenIdentifier();
ident.readFields(inBuf);
assertEquals("alice",ident.getUser().getUserName());
long createTime=ident.getIssueDate();
long maxTime=ident.getMaxDate();
long currentTime=System.currentTimeMillis();
System.out.println("create time: " + createTime);
System.out.println("current time: " + currentTime);
System.out.println("max time: " + maxTime);
assertTrue("createTime < current",createTime < currentTime);
assertTrue("current < maxTime",currentTime < maxTime);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
client.renewDelegationToken(token);
client.renewDelegationToken(token);
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
bobClient.renewDelegationToken(token);
Assert.fail("bob renew");
}
catch ( AccessControlException ace) {
}
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
bobClient.cancelDelegationToken(token);
Assert.fail("bob cancel");
}
catch ( AccessControlException ace) {
}
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
client.cancelDelegationToken(token);
try {
client.cancelDelegationToken(token);
Assert.fail("second alice cancel");
}
catch ( InvalidToken it) {
}
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptOnDisk() throws Exception {
final int FETCHER=7;
Path p=new Path("file:///tmp/foo");
Path pTmp=OnDiskMapOutput.getTempPath(p,FETCHER);
FileSystem mFs=mock(FileSystem.class,RETURNS_DEEP_STUBS);
MapOutputFile mof=mock(MapOutputFile.class);
when(mof.getInputFileForWrite(any(TaskID.class),anyLong())).thenReturn(p);
OnDiskMapOutput odmo=spy(new OnDiskMapOutput(map1ID,id,mm,100L,job,mof,FETCHER,true,mFs,p));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(odmo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(mFs).create(eq(pTmp));
verify(mFs).delete(eq(pTmp),eq(false));
verify(odmo).abort();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptInMemory() throws Exception {
final int FETCHER=2;
InMemoryMapOutput immo=spy(new InMemoryMapOutput(job,id,mm,100,null,true));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(immo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(immo).abort();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES=10000;
final int OUTPUT_SIZE=7950;
JobConf conf=new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f);
TestExceptionReporter reporter=new TestExceptionReporter();
CyclicBarrier mergeStart=new CyclicBarrier(2);
CyclicBarrier mergeComplete=new CyclicBarrier(2);
StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete);
MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1,mgr.getNumMerges());
out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeComplete.await();
mergeStart.await();
Assert.assertEquals(2,mgr.getNumMerges());
mergeComplete.await();
Assert.assertEquals(2,mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException {
JobConf jobConf=new JobConf();
final int SORT_FACTOR=5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR);
MapOutputFile mapOutputFile=new MROutputFiles();
FileSystem fs=FileSystem.getLocal(jobConf);
MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile);
MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger");
int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor");
assertEquals(mergeFactor,SORT_FACTOR);
onDiskMerger.suspend();
Random rand=new Random();
for (int i=0; i < 2 * SORT_FACTOR; ++i) {
Path path=new Path("somePath");
CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt());
manager.closeOnDiskFile(cap);
}
LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0);
for (int i=0; i < pendingToBeMerged.size(); ++i) {
List inputs=pendingToBeMerged.get(i);
for (int j=1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize());
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception {
JobConf job=new JobConf();
job.setNumMapTasks(2);
TaskStatus status=new TaskStatus(){
@Override public boolean getIsMap(){
return false;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
}
;
Progress progress=new Progress();
TaskAttemptID reduceId=new TaskAttemptID("314159",0,TaskType.REDUCE,0,0);
ShuffleSchedulerImpl scheduler=new ShuffleSchedulerImpl(job,status,reduceId,null,progress,null,null,null);
JobID jobId=new JobID();
TaskID taskId1=new TaskID(jobId,TaskType.REDUCE,1);
scheduler.tipFailed(taskId1);
Assert.assertEquals("Progress should be 0.5",0.5f,progress.getProgress(),0.0f);
Assert.assertFalse(scheduler.waitUntilDone(1));
TaskID taskId0=new TaskID(jobId,TaskType.REDUCE,0);
scheduler.tipFailed(taskId0);
Assert.assertEquals("Progress should be 1.0",1.0f,progress.getProgress(),0.0f);
Assert.assertTrue(scheduler.waitUntilDone(1));
}
BooleanVerifier
/**
* This test creates some directories inside the toBeDeleted directory and
* then start the asyncDiskService.
* AsyncDiskService will create tasks to delete the content inside the
* toBeDeleted directories.
*/
@Test public void testMRAsyncDiskServiceStartupCleaning() throws Throwable {
FileSystem localFileSystem=FileSystem.getLocal(new Configuration());
String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"};
String a="a";
String b="b";
String c="b/c";
String d="d";
String suffix=Path.SEPARATOR_CHAR + MRAsyncDiskService.TOBEDELETED;
File fa=new File(vols[0] + suffix,a);
File fb=new File(vols[1] + suffix,b);
File fc=new File(vols[1] + suffix,c);
File fd=new File(vols[1] + suffix,d);
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
MRAsyncDiskService service=new MRAsyncDiskService(localFileSystem,vols);
makeSureCleanedUp(vols,service);
}
BooleanVerifier
/**
* This test creates some directories inside the volume roots, and then
* call asyncDiskService.MoveAndDeleteAllVolumes.
* We should be able to delete all files/dirs inside the volumes except
* the toBeDeleted directory.
*/
@Test public void testMRAsyncDiskServiceMoveAndDeleteAllVolumes() throws Throwable {
FileSystem localFileSystem=FileSystem.getLocal(new Configuration());
String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service=new MRAsyncDiskService(localFileSystem,vols);
String a="a";
String b="b";
String c="b/c";
String d="d";
File fa=new File(vols[0],a);
File fb=new File(vols[1],b);
File fc=new File(vols[1],c);
File fd=new File(vols[1],d);
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
service.cleanupAllVolumes();
assertFalse(fa.exists());
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(fd.exists());
makeSureCleanedUp(vols,service);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* This test creates some directories and then removes them through
* MRAsyncDiskService.
*/
@Test public void testMRAsyncDiskService() throws Throwable {
FileSystem localFileSystem=FileSystem.getLocal(new Configuration());
String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service=new MRAsyncDiskService(localFileSystem,vols);
String a="a";
String b="b";
String c="b/c";
String d="d";
File fa=new File(vols[0],a);
File fb=new File(vols[1],b);
File fc=new File(vols[1],c);
File fd=new File(vols[1],d);
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
service.moveAndDeleteRelativePath(vols[0],a);
assertFalse(fa.exists());
service.moveAndDeleteRelativePath(vols[1],b);
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(service.moveAndDeleteRelativePath(vols[1],"not_exists"));
IOException ee=null;
try {
service.moveAndDeleteAbsolutePath(TEST_ROOT_DIR + "/2");
}
catch ( IOException e) {
ee=e;
}
assertNotNull("asyncDiskService should not be able to delete files " + "outside all volumes",ee);
assertTrue(service.moveAndDeleteAbsolutePath(vols[1] + Path.SEPARATOR_CHAR + d));
makeSureCleanedUp(vols,service);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToleratesSomeUnwritableVolumes() throws Throwable {
FileSystem localFileSystem=FileSystem.getLocal(new Configuration());
String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"};
assertTrue(new File(vols[0]).mkdirs());
assertEquals(0,FileUtil.chmod(vols[0],"400"));
try {
new MRAsyncDiskService(localFileSystem,vols);
}
finally {
FileUtil.chmod(vols[0],"755");
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf jobConf=new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb",700);
jobConf.setInt("mapred.reduce.memory.mb",1500);
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(jobConf);
Job job=sleepJob.createJob(3,2,1000,1,500,1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.submit();
boolean completed=job.waitForCompletion(true);
Assert.assertTrue("Job should be completed",completed);
Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJob().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Configuration sleepConf=new Configuration(mrCluster.getConfig());
sleepConf.set(MRConfig.MASTER_ADDRESS,"local");
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(sleepConf);
int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2);
Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(SleepJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
verifySleepJobCounters(job);
verifyTaskProgress(job);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testRandomWriter().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RandomTextWriterJob randomWriterJob=new RandomTextWriterJob();
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024");
Job job=randomWriterJob.createJob(mrCluster.getConfig());
Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output");
FileOutputFormat.setOutputPath(job,outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(RandomTextWriterJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir);
int count=0;
while (iterator.hasNext()) {
FileStatus file=iterator.next();
if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
count++;
}
}
Assert.assertEquals("Number of part files is wrong!",3,count);
verifyRandomWriterCounters(job);
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
final SleepJob sleepJob=new SleepJob();
final JobConf sleepConf=new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString());
final long userLogKb=4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString());
final long amLogKb=7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7);
sleepJob.setConf(sleepConf);
final Job job=sleepJob.createJob(1,0,1L,100,0L,0);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
final JobId jobId=TypeConverter.toYarn(job.getJobID());
final ApplicationId appID=jobId.getAppId();
int pollElapsed=0;
while (true) {
Thread.sleep(1000);
pollElapsed+=1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
final String appIdStr=appID.toString();
final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length());
final String containerGlob="container_" + appIdSuffix + "_*_*";
final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG;
int numAppMasters=0;
int numMapTasks=0;
for (int i=0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf=mrCluster.getNodeManager(i).getConfig();
for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob);
for ( FileStatus slog : syslogs) {
boolean foundAppMaster=job.isUber();
final Path containerPathComponent=slog.getPath().getParent();
if (!foundAppMaster) {
final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName());
foundAppMaster=(cid.getId() == 1);
}
final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*"));
Arrays.sort(sysSiblings);
if (foundAppMaster) {
numAppMasters++;
}
else {
numMapTasks++;
}
if (foundAppMaster) {
Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024);
}
else {
Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024);
}
}
}
}
Assert.assertEquals("No AppMaster log found!",1,numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) {
Assert.assertEquals("MapTask log with uber found!",0,numMapTasks);
}
else {
Assert.assertEquals("No MapTask log found!",1,numMapTasks);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobSucceed() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobSucceed().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"out");
runJobSucceed(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobFail() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobFail().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-out");
runJobFail(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfo() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RMContext rmc=mrCluster.getResourceManager().getRMContext();
ResourceScheduler rms=mrCluster.getResourceManager().getResourceScheduler();
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",NUMNODEMANAGERS,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNotNull(n.get("NumContainers"));
Assert.assertEquals(n.get("NodeId") + ": Unexpected number of used containers",0,n.get("NumContainers").asInt());
Assert.assertEquals(n.get("NodeId") + ": Unexpected amount of used memory",0,n.get("UsedMemoryMB").asInt());
Assert.assertNotNull(n.get("AvailableMemoryMB"));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfoMissmatch() throws Exception {
RMContext rmc=mock(RMContext.class);
ResourceScheduler rms=mock(ResourceScheduler.class);
ConcurrentMap map=new ConcurrentHashMap();
RMNode node=MockNodes.newNodeInfo(1,MockNodes.newResource(4 * 1024));
map.put(node.getNodeID(),node);
when(rmc.getRMNodes()).thenReturn(map);
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",1,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNull(n.get("NumContainers"));
Assert.assertNull(n.get("UsedMemoryMB"));
Assert.assertNull(n.get("AvailableMemoryMB"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSpeculativeExecution() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runSpecTest(false,false);
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Counters counters=job.getCounters();
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue());
job=runSpecTest(true,false);
succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
counters=job.getCounters();
Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue());
Assert.assertEquals(1,counters.findCounter(JobCounter.NUM_KILLED_MAPS).getValue());
job=runSpecTest(false,true);
succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
counters=job.getCounters();
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Override @Test public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting uberized testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runFailingMapperJob();
TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0);
TaskAttemptID aId=new TaskAttemptID(taskID,0);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
boolean secondTaskAttemptExists=true;
try {
aId=new TaskAttemptID(taskID,1);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
}
catch ( Exception e) {
secondTaskAttemptExists=false;
}
Assert.assertEquals(false,secondTaskAttemptExists);
TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2);
Assert.assertEquals(1,events.length);
TaskCompletionEvent.Status status=events[0].getStatus();
Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED || status == TaskCompletionEvent.Status.TIPFAILED);
Assert.assertEquals(JobStatus.State.FAILED,job.getJobState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskAttemptId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskAttemptId t1=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
TaskAttemptId t2=createTaskAttemptId(ts1,2,2,TaskType.REDUCE,2);
TaskAttemptId t3=createTaskAttemptId(ts1,2,2,TaskType.MAP,3);
TaskAttemptId t4=createTaskAttemptId(ts1,2,2,TaskType.MAP,1);
TaskAttemptId t5=createTaskAttemptId(ts1,2,1,TaskType.MAP,3);
TaskAttemptId t6=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
assertTrue(t1.equals(t6));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t6) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) < 0);
assertTrue(t1.compareTo(t4) > 0);
assertTrue(t1.compareTo(t5) > 0);
assertTrue(t1.hashCode() == t6.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskAttemptId t7=createTaskAttemptId(ts2,5463346,4326575,TaskType.REDUCE,54375);
assertEquals("attempt_" + ts1 + "_0002_m_000002_2",t1.toString());
assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375",t7.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskId t1=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t2=createTaskId(ts1,1,2,TaskType.REDUCE);
TaskId t3=createTaskId(ts1,1,1,TaskType.MAP);
TaskId t4=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t5=createTaskId(ts2,1,1,TaskType.MAP);
assertTrue(t1.equals(t4));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t4) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) > 0);
assertTrue(t1.compareTo(t5) < 0);
assertTrue(t1.hashCode() == t4.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskId t6=createTaskId(ts1,324151,54643747,TaskType.REDUCE);
assertEquals("task_" + ts1 + "_0001_m_000002",t1.toString());
assertEquals("task_" + ts1 + "_324151_r_54643747",t6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
JobId j1=createJobId(ts1,2);
JobId j2=createJobId(ts1,1);
JobId j3=createJobId(ts2,1);
JobId j4=createJobId(ts1,2);
assertTrue(j1.equals(j4));
assertFalse(j1.equals(j2));
assertFalse(j1.equals(j3));
assertTrue(j1.compareTo(j4) == 0);
assertTrue(j1.compareTo(j2) > 0);
assertTrue(j1.compareTo(j3) < 0);
assertTrue(j1.hashCode() == j4.hashCode());
assertFalse(j1.hashCode() == j2.hashCode());
assertFalse(j1.hashCode() == j3.hashCode());
JobId j5=createJobId(ts1,231415);
assertEquals("job_" + ts1 + "_0002",j1.toString());
assertEquals("job_" + ts1 + "_231415",j5.toString());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNotificationOnLastRetryNormalShutdown() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,true,this.getClass().getName(),true,2,true));
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForInternalState(job,JobStateInternal.SUCCEEDED);
app.shutDownJob();
Assert.assertTrue(app.isLastAMRetry());
Assert.assertEquals(1,JobEndServlet.calledTimes);
Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",JobEndServlet.requestUri.getQuery());
Assert.assertEquals(JobState.SUCCEEDED.toString(),JobEndServlet.foundJobState);
server.stop();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNotifyRetries() throws InterruptedException {
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"0");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"1");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL,"http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"5000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"5000");
JobReport jobReport=mock(JobReport.class);
long startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
long endTime=System.currentTimeMillis();
Assert.assertEquals("Only 1 try was expected but was : " + this.notificationCount,1,this.notificationCount);
Assert.assertTrue("Should have taken more than 5 seconds it took " + (endTime - startTime),endTime - startTime > 5000);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"3000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"3000");
startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
endTime=System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : " + this.notificationCount,3,this.notificationCount);
Assert.assertTrue("Should have taken more than 9 seconds it took " + (endTime - startTime),endTime - startTime > 9000);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNotificationOnLastRetryUnregistrationFailure() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,2,false));
app.isLastAMRetry=true;
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job,JobStateInternal.REBOOT);
app.waitForServiceToStop(10000);
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0,JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAbsentNotificationOnNotLastRetryUnregistrationFailure() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,1,false));
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job,JobStateInternal.REBOOT);
app.shutDownJob();
app.waitForState(job,JobState.RUNNING);
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0,JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testContainerPassThrough() throws Exception {
MRApp app=new MRApp(0,1,true,this.getClass().getName(),true){
@Override protected ContainerLauncher createContainerLauncher( AppContext context){
return new MockContainerLauncher(){
@Override public void handle( ContainerLauncherEvent event){
if (event instanceof ContainerRemoteLaunchEvent) {
containerObtainedByContainerLauncher=((ContainerRemoteLaunchEvent)event).getAllocatedContainer();
}
super.handle(event);
}
}
;
}
}
;
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Collection tasks=job.getTasks().values();
Collection taskAttempts=tasks.iterator().next().getAttempts().values();
TaskAttemptImpl taskAttempt=(TaskAttemptImpl)taskAttempts.iterator().next();
Assert.assertTrue(taskAttempt.container == containerObtainedByContainerLauncher);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterFailLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitFailureFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.FAILED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterMissingStaging() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
File dir=new File(stagingDir);
if (dir.exists()) {
FileUtils.deleteDirectory(dir);
}
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR,appMaster.forcedState);
appMaster.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitSuccessFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.SUCCEEDED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"SUCCEEDED");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterMidLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testViewAclOnlyCannotModify() throws Exception {
final MRAppWithClientService app=new MRAppWithClientService(1,0,false);
final Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf.set(MRJobConfig.JOB_ACL_VIEW_JOB,"viewonlyuser");
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
TaskAttempt attempt=task.getAttempts().values().iterator().next();
app.waitForState(attempt,TaskAttemptState.RUNNING);
UserGroupInformation viewOnlyUser=UserGroupInformation.createUserForTesting("viewonlyuser",new String[]{});
Assert.assertTrue("viewonlyuser cannot view job",job.checkAccess(viewOnlyUser,JobACL.VIEW_JOB));
Assert.assertFalse("viewonlyuser can modify job",job.checkAccess(viewOnlyUser,JobACL.MODIFY_JOB));
MRClientProtocol client=viewOnlyUser.doAs(new PrivilegedExceptionAction(){
@Override public MRClientProtocol run() throws Exception {
YarnRPC rpc=YarnRPC.create(conf);
return (MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf);
}
}
);
KillJobRequest killJobRequest=recordFactory.newRecordInstance(KillJobRequest.class);
killJobRequest.setJobId(app.getJobId());
try {
client.killJob(killJobRequest);
fail("viewonlyuser killed job");
}
catch ( AccessControlException e) {
}
KillTaskRequest killTaskRequest=recordFactory.newRecordInstance(KillTaskRequest.class);
killTaskRequest.setTaskId(task.getID());
try {
client.killTask(killTaskRequest);
fail("viewonlyuser killed task");
}
catch ( AccessControlException e) {
}
KillTaskAttemptRequest killTaskAttemptRequest=recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.killTaskAttempt(killTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
}
catch ( AccessControlException e) {
}
FailTaskAttemptRequest failTaskAttemptRequest=recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.failTaskAttempt(failTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
}
catch ( AccessControlException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testSpeculative() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator t1it=mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1=t1it.next();
TaskAttempt task1Attempt2=t1it.next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId=task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount));
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testCrashed() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1,TaskAttemptState.FAILED);
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2,mapTask1.getAttempts().size());
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2,TaskAttemptState.FAILED);
timeOut=0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3=itr.next();
app.waitForState(task1Attempt3,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3,TaskAttemptState.KILLED);
timeOut=0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4=itr.next();
app.waitForState(task1Attempt4,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDeletionofStaging() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
JobId jobid=recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.RUNNING,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(true,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs).delete(stagingJobPath,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNoDeletionofStagingOnReboot() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.REBOOT,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(false,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs,times(0)).delete(stagingJobPath,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testBasic() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitCompletedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(startCommitFile.toString(),fs.exists(startCommitFile));
assertTrue(endCommitSuccessFile.toString(),fs.exists(endCommitSuccessFile));
assertFalse(endCommitFailureFile.toString(),fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFailure() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter).commitJob(any(JobContext.class));
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitFailedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(fs.exists(startCommitFile));
assertFalse(fs.exists(endCommitSuccessFile));
assertTrue(fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMetaInfoSizeOverMax() throws Exception {
Configuration conf=new Configuration();
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
JobImpl job=new JobImpl(jobId,ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,0),0),conf,mock(EventHandler.class),null,new JobTokenSecretManager(),new Credentials(),null,null,mrAppMetrics,null,true,null,0,null,null,null,null);
InitTransition initTransition=new InitTransition(){
@Override protected TaskSplitMetaInfo[] createSplits( JobImpl job, JobId jobId){
throw new YarnRuntimeException(EXCEPTIONMSG);
}
}
;
JobEvent mockJobEvent=mock(JobEvent.class);
JobStateInternal jobSI=initTransition.transition(job,mockJobEvent);
Assert.assertTrue("When init fails, return value from InitTransition.transition should equal NEW.",jobSI.equals(JobStateInternal.NEW));
Assert.assertTrue("Job diagnostics should contain YarnRuntimeException",job.getDiagnostics().toString().contains("YarnRuntimeException"));
Assert.assertTrue("Job diagnostics should contain " + EXCEPTIONMSG,job.getDiagnostics().toString().contains(EXCEPTIONMSG));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUberDecision() throws Exception {
Configuration conf=new Configuration();
boolean isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,0);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,1);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXMAPS,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setInt(MRJobConfig.REDUCE_MEMORY_MB,2048);
conf.setInt(MRJobConfig.REDUCE_CPU_VCORES,10);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testJobNoTasks(){
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
conf.set(MRJobConfig.WORKFLOW_ID,"testId");
conf.set(MRJobConfig.WORKFLOW_NAME,"testName");
conf.set(MRJobConfig.WORKFLOW_NODE_NAME,"testNodeName");
conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key1","value1");
conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key2","value2");
conf.set(MRJobConfig.WORKFLOW_TAGS,"tag1,tag2");
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer=mock(OutputCommitter.class);
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
JobSubmittedEventHandler jseHandler=new JobSubmittedEventHandler("testId","testName","testNodeName","\"key2\"=\"value2\" \"key1\"=\"value1\" ","tag1,tag2");
dispatcher.register(EventType.class,jseHandler);
JobImpl job=createStubbedJob(conf,dispatcher,0,null);
job.handle(new JobEvent(job.getID(),JobEventType.JOB_INIT));
assertJobState(job,JobStateInternal.INITED);
job.handle(new JobStartEvent(job.getID()));
assertJobState(job,JobStateInternal.SUCCEEDED);
dispatcher.stop();
commitHandler.stop();
try {
Assert.assertTrue(jseHandler.getAssertValue());
}
catch ( InterruptedException e) {
Assert.fail("Workflow related attributes are not tested properly");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testReportDiagnostics() throws Exception {
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
final String diagMsg="some diagnostic message";
final JobDiagnosticsUpdateEvent diagUpdateEvent=new JobDiagnosticsUpdateEvent(jobId,diagMsg);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
AppContext mockContext=mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
JobImpl job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(diagUpdateEvent);
String diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(new JobEvent(jobId,JobEventType.JOB_KILL));
job.handle(diagUpdateEvent);
diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckAccess(){
String user1=System.getProperty("user.name");
String user2=user1 + "1234";
UserGroupInformation ugi1=UserGroupInformation.createRemoteUser(user1);
UserGroupInformation ugi2=UserGroupInformation.createRemoteUser(user2);
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
Configuration conf1=new Configuration();
conf1.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf1.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job1=new JobImpl(jobId,null,conf1,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job1.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertFalse(job1.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf2=new Configuration();
conf2.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf2.set(MRJobConfig.JOB_ACL_VIEW_JOB,user2);
JobImpl job2=new JobImpl(jobId,null,conf2,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job2.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job2.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf3=new Configuration();
conf3.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf3.set(MRJobConfig.JOB_ACL_VIEW_JOB,"*");
JobImpl job3=new JobImpl(jobId,null,conf3,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job3.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job3.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf4=new Configuration();
conf4.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
conf4.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job4=new JobImpl(jobId,null,conf4,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job4.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job4.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf5=new Configuration();
conf5.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job5=new JobImpl(jobId,null,conf5,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job5.checkAccess(ugi1,null));
Assert.assertTrue(job5.checkAccess(ugi2,null));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCommandLine() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEnvironmentVariables() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test");
conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN");
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
app=new MyMRApp(1,0,true,this.getClass().getName(),true);
conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace");
job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testShuffleProviders() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(YarnConfiguration.NM_AUX_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
String serviceName=TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
String serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler1.class.getName());
serviceName=TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler2.class.getName());
jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
Credentials credentials=new Credentials();
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(null,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Map serviceDataMap=launchCtx.getServiceData();
Assert.assertNotNull("TestShuffleHandler1 is missing",serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertNotNull("TestShuffleHandler2 is missing",serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertTrue("mismatch number of services in map",serviceDataMap.size() == 3);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFetchFailureAttemptFinishTime() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
assertTrue("Task Attempt finish time is not greater than 0",taImpl.getFinishTime() > 0);
Long finishTime=taImpl.getFinishTime();
Thread.sleep(5);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in Too Many Fetch Failure state",taImpl.getState(),TaskAttemptState.FAILED);
assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ",finishTime,Long.valueOf(taImpl.getFinishTime()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDoubleTooManyFetchFailure() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state",taImpl.getState(),TaskAttemptState.FAILED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state, still",taImpl.getState(),TaskAttemptState.FAILED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),null);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",Locality.NODE_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTooManyFetchFailureAfterKill() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state",taImpl.getState(),TaskAttemptState.KILLED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in KILLED state, still",taImpl.getState(),TaskAttemptState.KILLED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state",taImpl.getState(),TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is assigned locally",Locality.OFF_SWITCH,taImpl.getLocality());
}
BooleanVerifier
@Test public void testAppDiognosticEventOnUnassignedTask() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,"Task got killed"));
assertFalse("InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileCommitPending() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task should be in COMMIT_PENDING state",TaskAttemptStateInternal.COMMIT_PENDING,taImpl.getInternalState());
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
BooleanVerifier
@Test public void testAppDiognosticEventOnNewTask() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,"Task got killed"));
assertFalse("InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local rack",Locality.RACK_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAttemptContainerRequest() throws Exception {
final Text SECRET_KEY_ALIAS=new Text("secretkeyalias");
final byte[] SECRET_KEY=("secretkey").getBytes();
Map acls=new HashMap(1);
acls.put(ApplicationAccessType.VIEW_APP,"otheruser");
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(jobConf);
Credentials credentials=new Credentials();
credentials.addSecretKey(SECRET_KEY_ALIAS,SECRET_KEY);
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(acls,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Assert.assertEquals("ACLs mismatch",acls,launchCtx.getApplicationACLs());
Credentials launchCredentials=new Credentials();
DataInputByteBuffer dibb=new DataInputByteBuffer();
dibb.reset(launchCtx.getTokens());
launchCredentials.readTokenStorageStream(dibb);
for ( Token extends TokenIdentifier> token : credentials.getAllTokens()) {
Token extends TokenIdentifier> launchToken=launchCredentials.getToken(token.getService());
Assert.assertNotNull("Token " + token.getService() + " is missing",launchToken);
Assert.assertEquals("Token " + token.getService() + " mismatch",token,launchToken);
}
Assert.assertNotNull("Secret key missing",launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
Assert.assertTrue("Secret key mismatch",Arrays.equals(SECRET_KEY,launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
BooleanVerifier
@Test public void testKillDuringTaskAttemptCommit(){
mockTask=createMockTask(TaskType.REDUCE);
TaskId taskId=getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
TaskAttemptId commitAttempt=getLastAttempt().getAttemptId();
updateLastAttemptState(TaskAttemptState.KILLED);
killRunningTaskAttempt(commitAttempt);
assertFalse(mockTask.canCommit(commitAttempt));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailureDuringTaskAttemptCommit(){
mockTask=createMockTask(TaskType.MAP);
TaskId taskId=getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.FAILED);
failRunningTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(2,taskAttempts.size());
updateLastAttemptState(TaskAttemptState.SUCCEEDED);
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED));
assertFalse("First attempt should not commit",mockTask.canCommit(taskAttempts.get(0).getAttemptId()));
assertTrue("Second attempt should commit",mockTask.canCommit(getLastAttempt().getAttemptId()));
assertTaskSucceededState();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testSlowNM() throws Exception {
conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.setInt("yarn.rpc.nm-command-timeout",3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName());
YarnRPC rpc=YarnRPC.create(conf);
String bindAddr="localhost:0";
InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr);
NMTokenSecretManagerInNM tokenSecretManager=new NMTokenSecretManagerInNM();
MasterKey masterKey=Records.newRecord(MasterKey.class);
masterKey.setBytes(ByteBuffer.wrap("key".getBytes()));
tokenSecretManager.setMasterKey(masterKey);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"token");
server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,tokenSecretManager,1);
server.start();
MRApp app=new MRAppWithSlowNM(tokenSecretManager);
try {
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.waitForState(job,JobState.FAILED);
String diagnostics=attempt.getDiagnostics().toString();
LOG.info("attempt.getDiagnostics: " + diagnostics);
Assert.assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : "));
Assert.assertTrue(diagnostics.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel"));
}
finally {
server.stop();
app.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCompletedTasksRecalculateSchedule() throws Exception {
LOG.info("Running testCompletedTasksRecalculateSchedule");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job job=mock(Job.class);
when(job.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
doReturn(10).when(job).getTotalMaps();
doReturn(10).when(job).getTotalReduces();
doReturn(0).when(job).getCompletedMaps();
RecalculateContainerAllocator allocator=new RecalculateContainerAllocator(rm,conf,appAttemptId,job);
allocator.schedule();
allocator.recalculatedReduceSchedule=false;
allocator.schedule();
Assert.assertFalse("Unexpected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
doReturn(1).when(job).getCompletedMaps();
allocator.schedule();
Assert.assertTrue("Expected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHeartbeatHandler() throws Exception {
LOG.info("Running testHeartbeatHandler");
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS,1);
ControlledClock clock=new ControlledClock(new SystemClock());
AppContext appContext=mock(AppContext.class);
when(appContext.getClock()).thenReturn(clock);
when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1,1));
RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),appContext,new NoopAMPreemptionPolicy()){
@Override protected void register(){
}
@Override protected ApplicationMasterProtocol createSchedulerProxy(){
return mock(ApplicationMasterProtocol.class);
}
@Override protected synchronized void heartbeat() throws Exception {
}
}
;
allocator.init(conf);
allocator.start();
clock.setTime(5);
int timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(5,allocator.getLastHeartbeatTime());
clock.setTime(7);
timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(7,allocator.getLastHeartbeatTime());
final AtomicBoolean callbackCalled=new AtomicBoolean(false);
allocator.runOnNextHeartbeat(new Runnable(){
@Override public void run(){
callbackCalled.set(true);
}
}
);
clock.setTime(8);
timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(8,allocator.getLastHeartbeatTime());
Assert.assertTrue(callbackCalled.get());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlackListedNodes() throws Exception {
LOG.info("Running testBlackListedNodes");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f2);
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assertBlacklistAdditionsAndRemovals(2,0,rm);
nodeManager1.nodeHeartbeat(false);
nodeManager2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertTrue("No of assignments must be 3",assigned.size() == 3);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertTrue("Assigned container host not correct","h3".equals(assig.getContainer().getNodeId().getHost()));
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapNodeLocality() throws Exception {
LOG.info("Running testMapNodeLocality");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",3072);
rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",1536);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h1"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h2"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent event : assigned) {
if (event.getTaskAttemptID().equals(event3.getAttemptID())) {
assigned.remove(event);
Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3"));
break;
}
}
checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReducerRampdownDiagnostics() throws Exception {
LOG.info("Running tesReducerRampdownDiagnostics");
final Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.0f);
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
final DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
final RMApp app=rm.submitApp(1024);
dispatcher.await();
final String host="host1";
final MockNM nm=rm.registerNode(String.format("%s:1234",host),2048);
nm.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
final JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
final Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
final MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
dispatcher.await();
final String[] locations=new String[]{host};
allocator.sendRequest(createReq(jobId,0,1024,locations,false,true));
for (int i=0; i < 1; ) {
dispatcher.await();
i+=allocator.schedule().size();
nm.nodeHeartbeat(true);
}
allocator.sendRequest(createReq(jobId,0,1024,locations,true,false));
while (allocator.getTaskAttemptKillEvents().size() == 0) {
dispatcher.await();
allocator.schedule().size();
nm.nodeHeartbeat(true);
}
final String killEventMessage=allocator.getTaskAttemptKillEvents().get(0).getMessage();
Assert.assertTrue("No reducer rampDown preemption message",killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUpdatedNodes() throws Exception {
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nm1=rm.registerNode("h1:1234",10240);
MockNM nm2=rm.registerNode("h2:1234",10240);
dispatcher.await();
ContainerRequestEvent event=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event);
TaskAttemptId attemptId=event.getAttemptID();
TaskAttempt mockTaskAttempt=mock(TaskAttempt.class);
when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
Task mockTask=mock(Task.class);
when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
List assigned=allocator.schedule();
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(3,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
allocator.getJobUpdatedNodeEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(1,assigned.size());
Assert.assertEquals(nm1.getNodeId(),assigned.get(0).getContainer().getNodeId());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
nm1.nodeHeartbeat(false);
nm2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(1,allocator.getTaskAttemptKillEvents().size());
Assert.assertEquals(2,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
Assert.assertEquals(attemptId,allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
allocator.getJobUpdatedNodeEvents().clear();
allocator.getTaskAttemptKillEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUnregistrationOnlyIfRegistered() throws Exception {
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
mrApp.submit(conf);
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
amDispatcher.await();
Assert.assertTrue(allocator.isApplicationMasterRegistered());
mrApp.stop();
Assert.assertTrue(allocator.isUnregistered());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMContainerAllocatorResendsRequestsOnRMRestart() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RECOVERY_ENABLED,"true");
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,true);
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1=new MyResourceManager(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm1,conf,appAttemptId,mockJob);
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h1","h2"});
allocator.sendRequest(event2);
ContainerFailedEvent f1=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f1);
List assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(1,0,rm1);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 2",2,assignedContainers.size());
assertAsksAndReleases(0,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
ContainerRequestEvent event3=createReq(jobId,3,1000,new String[]{"h1"});
allocator.sendRequest(event3);
ContainerAllocatorEvent deallocate1=createDeallocateEvent(jobId,1,false);
allocator.sendDeallocate(deallocate1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,1,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
MyResourceManager rm2=new MyResourceManager(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
allocator.updateSchedulerProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ContainerAllocatorEvent deallocate2=createDeallocateEvent(jobId,2,false);
allocator.sendDeallocate(deallocate2);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h3",false);
allocator.sendFailure(f2);
ContainerRequestEvent event4=createReq(jobId,4,2000,new String[]{"h1","h2"});
allocator.sendRequest(event4);
allocator.schedule();
dispatcher.await();
Assert.assertTrue("Last allocate response is not RESYNC",allocator.isResyncCommand());
ContainerRequestEvent event5=createReq(jobId,5,3000,new String[]{"h1","h2","h3"});
allocator.sendRequest(event5);
assignedContainers=allocator.schedule();
dispatcher.await();
assertAsksAndReleases(3,2,rm2);
assertBlacklistAdditionsAndRemovals(2,0,rm2);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("Number of container should be 3",3,assignedContainers.size());
for ( TaskAttemptContainerAssignedEvent assig : assignedContainers) {
Assert.assertTrue("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapReduceScheduling() throws Exception {
LOG.info("Running testMapReduceScheduling");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",1024);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1","h2"},true,false);
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,3000,new String[]{"h1"},false,true);
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,2048,new String[]{"h3"},false,false);
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertFalse("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testMRWebAppSSLDisabled() throws Exception {
MRApp app=new MRApp(2,2,true,this.getClass().getName(),true){
@Override protected ClientService createClientService( AppContext context){
return new MRClientService(context);
}
}
;
Configuration conf=new Configuration();
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,Policy.HTTPS_ONLY.name());
Job job=app.submit(conf);
String hostPort=NetUtils.getHostPortString(((MRClientService)app.getClientService()).getWebApp().getListenerAddress());
URL httpUrl=new URL("http://" + hostPort);
HttpURLConnection conn=(HttpURLConnection)httpUrl.openConnection();
InputStream in=conn.getInputStream();
ByteArrayOutputStream out=new ByteArrayOutputStream();
IOUtils.copyBytes(in,out,1024);
Assert.assertTrue(out.toString().contains("MapReduce Application"));
URL httpsUrl=new URL("https://" + hostPort);
try {
HttpURLConnection httpsConn=(HttpURLConnection)httpsUrl.openConnection();
httpsConn.getInputStream();
Assert.fail("https:// is not accessible, expected to fail");
}
catch ( Exception e) {
Assert.assertTrue(e instanceof SSLException);
}
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
BooleanVerifier
/**
* Test rendering for ConfBlock
*/
@Test public void testConfigurationBlock() throws Exception {
AppContext ctx=mock(AppContext.class);
Job job=mock(Job.class);
Path path=new Path("conf");
Configuration configuration=new Configuration();
configuration.set("Key for test","Value for test");
when(job.getConfFile()).thenReturn(path);
when(job.loadConfFile()).thenReturn(configuration);
when(ctx.getJob(any(JobId.class))).thenReturn(job);
ConfBlockForTest configurationBlock=new ConfBlockForTest(ctx);
PrintWriter pWriter=new PrintWriter(data);
Block html=new BlockForTest(new HtmlBlockForTest(),pWriter,0,false);
configurationBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains("Sorry, can't do anything without a JobID"));
configurationBlock.addParameter(AMParams.JOB_ID,"job_01_01");
data.reset();
configurationBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains("Key for test"));
assertTrue(data.toString().contains("Value for test"));
}
BooleanVerifier
/**
* Test rendering for TasksBlock
*/
@Test public void testTasksBlock() throws Exception {
ApplicationId appId=ApplicationIdPBImpl.newInstance(0,1);
JobId jobId=new JobIdPBImpl();
jobId.setId(0);
jobId.setAppId(appId);
TaskId taskId=new TaskIdPBImpl();
taskId.setId(0);
taskId.setTaskType(TaskType.MAP);
taskId.setJobId(jobId);
Task task=mock(Task.class);
when(task.getID()).thenReturn(taskId);
TaskReport report=mock(TaskReport.class);
when(report.getProgress()).thenReturn(0.7f);
when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
when(report.getStartTime()).thenReturn(100001L);
when(report.getFinishTime()).thenReturn(100011L);
when(report.getStatus()).thenReturn("Dummy Status \n*");
when(task.getReport()).thenReturn(report);
when(task.getType()).thenReturn(TaskType.MAP);
Map tasks=new HashMap();
tasks.put(taskId,task);
AppContext ctx=mock(AppContext.class);
Job job=mock(Job.class);
when(job.getTasks()).thenReturn(tasks);
App app=new App(ctx);
app.setJob(job);
TasksBlockForTest taskBlock=new TasksBlockForTest(app);
taskBlock.addParameter(AMParams.TASK_TYPE,"m");
PrintWriter pWriter=new PrintWriter(data);
Block html=new BlockForTest(new HtmlBlockForTest(),pWriter,0,false);
taskBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains("task_0_0001_m_000000"));
assertTrue(data.toString().contains("70.00"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertTrue(data.toString().contains("100001"));
assertTrue(data.toString().contains("100011"));
assertFalse(data.toString().contains("Dummy Status \n*"));
assertTrue(data.toString().contains("Dummy Status \\n*"));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testTaskStartTimes(){
TaskId taskId=mock(TaskId.class);
TaskInfo taskInfo=mock(TaskInfo.class);
Map taskAttempts=new TreeMap();
TaskAttemptID id=new TaskAttemptID("0",0,TaskType.MAP,0,0);
TaskAttemptInfo info=mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(10l);
taskAttempts.put(id,info);
id=new TaskAttemptID("1",0,TaskType.MAP,1,1);
info=mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(20l);
taskAttempts.put(id,info);
when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
CompletedTask task=new CompletedTask(taskId,taskInfo);
TaskReport report=task.getReport();
assertTrue(report.getStartTime() == 10);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test some methods of CompletedTaskAttempt
*/
@Test(timeout=5000) public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo=mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId=new JobID("12345",0);
TaskID taskId=new TaskID(jobId,TaskType.REDUCE,0);
TaskAttemptID taskAttemptId=new TaskAttemptID(taskId,0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt=new CompletedTaskAttempt(null,attemptInfo);
assertEquals("Rackname",taskAttemt.getNodeRackName());
assertEquals(Phase.CLEANUP,taskAttemt.getPhase());
assertTrue(taskAttemt.isFinished());
assertEquals(11L,taskAttemt.getShuffleFinishTime());
assertEquals(12L,taskAttemt.getSortFinishTime());
assertEquals(10,taskAttemt.getShufflePort());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
new Thread(){
@Override public void run(){
try {
Thread.sleep(500);
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
}
}
.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0),new SystemClock());
}
UtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=YarnRuntimeException.class) public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
final ControlledClock clock=new ControlledClock(new SystemClock());
clock.setTime(1);
new Thread(){
@Override public void run(){
try {
Thread.sleep(500);
clock.setTime(3000);
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
}
}
.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0),clock);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0),true);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0),false);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithAdditionalFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
dfsCluster2.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());
Configuration conf=new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0),true);
Assert.assertTrue(dfsCluster.getFileSystem().exists(new Path(getDoneDirNameForTest())));
Assert.assertTrue(dfsCluster.getFileSystem().exists(new Path(getIntermediateDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem().exists(new Path(getDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem().exists(new Path(getIntermediateDoneDirNameForTest())));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUpdatedTokenRecovery() throws IOException {
IOException intentionalErr=new IOException("intentional error");
FileSystem fs=FileSystem.getLocal(conf);
final FileSystem spyfs=spy(fs);
ArgumentMatcher updateTmpMatcher=new ArgumentMatcher(){
@Override public boolean matches( Object argument){
if (argument instanceof Path) {
return ((Path)argument).getName().startsWith("update");
}
return false;
}
}
;
doThrow(intentionalErr).when(spyfs).rename(argThat(updateTmpMatcher),isA(Path.class));
conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,testDir.getAbsoluteFile().toURI().toString());
HistoryServerStateStoreService store=new HistoryServerFileSystemStateStoreService(){
@Override FileSystem createFileSystem() throws IOException {
return spyfs;
}
}
;
store.init(conf);
store.start();
final MRDelegationTokenIdentifier token1=new MRDelegationTokenIdentifier(new Text("tokenOwner1"),new Text("tokenRenewer1"),new Text("tokenUser1"));
token1.setSequenceNumber(1);
final Long tokenDate1=1L;
store.storeToken(token1,tokenDate1);
final Long newTokenDate1=975318642L;
try {
store.updateToken(token1,newTokenDate1);
fail("intentional error not thrown");
}
catch ( IOException e) {
assertEquals(intentionalErr,e);
}
store.close();
store=createAndStartStore();
HistoryServerState state=store.loadState();
assertEquals("incorrect loaded token count",1,state.tokenState.size());
assertTrue("missing token 1",state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date",newTokenDate1,state.tokenState.get(token1));
store.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
Configuration conf=new Configuration();
HistoryServerStateStoreService store=new HistoryServerMemStateStoreService();
store.init(conf);
store.start();
JHSDelegationTokenSecretManagerForTest mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.startThreads();
MRDelegationTokenIdentifier tokenId1=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token1=new Token(tokenId1,mgr);
MRDelegationTokenIdentifier tokenId2=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token2=new Token(tokenId2,mgr);
DelegationKey[] keys=mgr.getAllKeys();
long tokenRenewDate1=mgr.getAllTokens().get(tokenId1).getRenewDate();
long tokenRenewDate2=mgr.getAllTokens().get(tokenId2).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
List recoveredKeys=Arrays.asList(mgr.getAllKeys());
for ( DelegationKey key : keys) {
assertTrue("key missing after recovery",recoveredKeys.contains(key));
}
assertTrue("token1 missing",mgr.getAllTokens().containsKey(tokenId1));
assertEquals("token1 renew date",tokenRenewDate1,mgr.getAllTokens().get(tokenId1).getRenewDate());
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId1,token1.getPassword());
mgr.verifyToken(tokenId2,token2.getPassword());
MRDelegationTokenIdentifier tokenId3=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token3=new Token(tokenId3,mgr);
assertEquals("sequence number restore",tokenId2.getSequenceNumber() + 1,tokenId3.getSequenceNumber());
mgr.cancelToken(token1,"tokenOwner");
MRDelegationTokenIdentifier tokenIdFull=new MRDelegationTokenIdentifier(new Text("tokenOwner/localhost@LOCALHOST"),new Text("tokenRenewer"),new Text("tokenUser"));
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
Token tokenFull=new Token(tokenIdFull,mgr);
try {
mgr.cancelToken(tokenFull,"tokenOwner");
}
catch ( AccessControlException ace) {
assertTrue(ace.getMessage().contains("is not authorized to cancel the token"));
}
mgr.cancelToken(tokenFull,tokenIdFull.getOwner().toString());
long tokenRenewDate3=mgr.getAllTokens().get(tokenId3).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
assertFalse("token1 should be missing",mgr.getAllTokens().containsKey(tokenId1));
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
assertTrue("token3 missing",mgr.getAllTokens().containsKey(tokenId3));
assertEquals("token3 renew date",tokenRenewDate3,mgr.getAllTokens().get(tokenId3).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId2,token2.getPassword());
mgr.verifyToken(tokenId3,token3.getPassword());
mgr.stopThreads();
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRefreshLoadedJobCache() throws Exception {
HistoryFileManager historyManager=mock(HistoryFileManager.class);
jobHistory=spy(new JobHistory());
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
Configuration conf=new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE,"2");
jobHistory.init(conf);
jobHistory.start();
CachedHistoryStorage storage=spy((CachedHistoryStorage)jobHistory.getHistoryStorage());
Job[] jobs=new Job[3];
JobId[] jobIds=new JobId[3];
for (int i=0; i < 3; i++) {
jobs[i]=mock(Job.class);
jobIds[i]=mock(JobId.class);
when(jobs[i].getID()).thenReturn(jobIds[i]);
}
HistoryFileInfo fileInfo=mock(HistoryFileInfo.class);
when(historyManager.getFileInfo(any(JobId.class))).thenReturn(fileInfo);
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1]).thenReturn(jobs[2]);
for (int i=0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
Map jobCache=storage.getLoadedJobCache();
assertFalse(jobCache.containsKey(jobs[0].getID()));
assertTrue(jobCache.containsKey(jobs[1].getID()) && jobCache.containsKey(jobs[2].getID()));
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE,"3");
doReturn(conf).when(storage).createConf();
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1]).thenReturn(jobs[2]);
jobHistory.refreshLoadedJobCache();
for (int i=0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
jobCache=storage.getLoadedJobCache();
for (int i=0; i < 3; i++) {
assertTrue(jobCache.containsKey(jobs[i].getID()));
}
}
BooleanVerifier
@Test public void testRefreshLoadedJobCacheUnSupportedOperation(){
jobHistory=spy(new JobHistory());
HistoryStorage storage=new HistoryStorage(){
@Override public void setHistoryFileManager( HistoryFileManager hsManager){
}
@Override public JobsInfo getPartialJobs( Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState){
return null;
}
@Override public Job getFullJob( JobId jobId){
return null;
}
@Override public Map getAllPartialJobs(){
return null;
}
}
;
doReturn(storage).when(jobHistory).createHistoryStorage();
jobHistory.init(new Configuration());
jobHistory.start();
Throwable th=null;
try {
jobHistory.refreshLoadedJobCache();
}
catch ( Exception e) {
th=e;
}
assertTrue(th instanceof UnsupportedOperationException);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Simple test of some methods of CompletedJob
* @throws Exception
*/
@Test(timeout=30000) public void testGetTaskAttemptCompletionEvent() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskCompletionEvent[] events=completedJob.getMapAttemptCompletionEvents(0,1000);
assertEquals(10,completedJob.getMapAttemptCompletionEvents(0,10).length);
int currentEventId=0;
for ( TaskCompletionEvent taskAttemptCompletionEvent : events) {
int eventId=taskAttemptCompletionEvent.getEventId();
assertTrue(eventId >= currentEventId);
currentEventId=eventId;
}
assertNull(completedJob.loadConfFile());
assertEquals("Sleep job",completedJob.getName());
assertEquals("default",completedJob.getQueueName());
assertEquals(1.0,completedJob.getProgress(),0.001);
assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);
assertEquals(1,completedJob.getDiagnostics().size());
assertEquals("",completedJob.getDiagnostics().get(0));
assertEquals(0,completedJob.getJobACLs().size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHistoryEvents() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
Job parsedJob=context.getJob(jobId);
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct",2,parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"),parsedJob.getUserName());
Map tasks=parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct",3,tasks.size());
for ( Task task : tasks.values()) {
verifyTask(task);
}
Map maps=parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct",2,maps.size());
Map reduces=parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct",1,reduces.size());
Assert.assertEquals("CompletedReduce not correct",1,parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAssignedQueue() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true,"assignedQueue");
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
Job parsedJob=context.getJob(jobId);
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("QueueName not correct","assignedQueue",parsedJob.getQueueName());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testDiagnosticsForKilledJob() throws Exception {
LOG.info("STARTING testDiagnosticsForKilledJob");
try {
final Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithJobKilled(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.KILLED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
assertNull("Caught an expected exception " + parseException,parseException);
final List originalDiagnostics=job.getDiagnostics();
final String historyError=jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ",historyError);
for ( String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
assertTrue("No killed message in diagnostics",historyError.contains(JobImpl.JOB_KILLED_DIAG));
}
finally {
LOG.info("FINISHED testDiagnosticsForKilledJob");
}
}
InternalCallVerifier BooleanVerifier
@Test public void testMultipleFailedTasks() throws Exception {
JobHistoryParser parser=new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
EventReader reader=Mockito.mock(EventReader.class);
final AtomicInteger numEventsRead=new AtomicInteger(0);
final org.apache.hadoop.mapreduce.TaskType taskType=org.apache.hadoop.mapreduce.TaskType.MAP;
final TaskID[] tids=new TaskID[2];
final JobID jid=new JobID("1",1);
tids[0]=new TaskID(jid,taskType,0);
tids[1]=new TaskID(jid,taskType,1);
Mockito.when(reader.getNextEvent()).thenAnswer(new Answer(){
public HistoryEvent answer( InvocationOnMock invocation) throws IOException {
int eventId=numEventsRead.getAndIncrement();
TaskID tid=tids[eventId & 0x1];
if (eventId < 2) {
return new TaskStartedEvent(tid,0,taskType,"");
}
if (eventId < 4) {
TaskFailedEvent tfe=new TaskFailedEvent(tid,0,taskType,"failed","FAILED",null,new Counters());
tfe.setDatum(tfe.getDatum());
return tfe;
}
if (eventId < 5) {
JobUnsuccessfulCompletionEvent juce=new JobUnsuccessfulCompletionEvent(jid,100L,2,0,"JOB_FAILED",Collections.singletonList("Task failed: " + tids[0].toString()));
return juce;
}
return null;
}
}
);
JobInfo info=parser.parse(reader);
assertTrue("Task 0 not implicated",info.getErrorInfo().contains(tids[0].toString()));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Simple test PartialJob
*/
@Test(timeout=1000) public void testPartialJob() throws Exception {
JobId jobId=new JobIdPBImpl();
jobId.setId(0);
JobIndexInfo jii=new JobIndexInfo(0L,System.currentTimeMillis(),"user","jobName",jobId,3,2,"JobStatus");
PartialJob test=new PartialJob(jii,jobId);
Assert.assertEquals(1.0f,test.getProgress(),0.001f);
assertNull(test.getAllCounters());
assertNull(test.getTasks());
assertNull(test.getTasks(TaskType.MAP));
assertNull(test.getTask(new TaskIdPBImpl()));
assertNull(test.getTaskAttemptCompletionEvents(0,100));
assertNull(test.getMapAttemptCompletionEvents(0,100));
assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(),null));
assertNull(test.getAMInfos());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithFailedTask(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.FAILED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,parseException);
for ( Map.Entry entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID=TypeConverter.toYarn(entry.getKey());
CompletedTask ct=new CompletedTask(yarnTaskID,entry.getValue());
Assert.assertNotNull("completed task report has null counters",ct.getReport().getCounters());
}
final List originalDiagnostics=job.getDiagnostics();
final String historyError=jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ",historyError);
for ( String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
}
finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=50000) public void testScanningOldDirs() throws Exception {
LOG.info("STARTING testScanningOldDirs");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryFileManagerForTest hfm=new HistoryFileManagerForTest();
hfm.init(conf);
HistoryFileInfo fileInfo=hfm.getFileInfo(jobId);
Assert.assertNotNull("Unable to locate job history",fileInfo);
hfm.deleteJobFromJobListCache(fileInfo);
final int msecPerSleep=10;
int msecToSleep=10 * 1000;
while (fileInfo.isMovePending() && msecToSleep > 0) {
Assert.assertTrue(!fileInfo.didMoveFail());
msecToSleep-=msecPerSleep;
Thread.sleep(msecPerSleep);
}
Assert.assertTrue("Timeout waiting for history move",msecToSleep > 0);
fileInfo=hfm.getFileInfo(jobId);
hfm.stop();
Assert.assertNotNull("Unable to locate old job history",fileInfo);
Assert.assertTrue("HistoryFileManager not shutdown properly",hfm.moveToDoneExecutor.isTerminated());
}
finally {
LOG.info("FINISHED testScanningOldDirs");
}
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test clean old history files. Files should be deleted after 1 week by
* default.
*/
@Test(timeout=15000) public void testDeleteFileInfo() throws Exception {
LOG.info("STARTING testDeleteFileInfo");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryFileManager hfm=new HistoryFileManager();
hfm.init(conf);
HistoryFileInfo fileInfo=hfm.getFileInfo(jobId);
hfm.initExisting();
while (fileInfo.isMovePending()) {
Thread.sleep(300);
}
Assert.assertNotNull(hfm.jobListCache.values());
hfm.clean();
Assert.assertFalse(fileInfo.isDeleted());
hfm.setMaxHistoryAge(-1);
hfm.clean();
hfm.stop();
Assert.assertTrue("Thread pool shutdown",hfm.moveToDoneExecutor.isTerminated());
Assert.assertTrue("file should be deleted ",fileInfo.isDeleted());
}
finally {
LOG.info("FINISHED testDeleteFileInfo");
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testReports() throws Exception {
Configuration config=new Configuration();
config.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(config);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(config);
Job job=app.getContext().getAllJobs().values().iterator().next();
app.waitForState(job,JobState.SUCCEEDED);
historyServer=new JobHistoryServer();
historyServer.init(config);
historyServer.start();
JobHistory jobHistory=null;
for ( Service service : historyServer.getServices()) {
if (service instanceof JobHistory) {
jobHistory=(JobHistory)service;
}
}
;
Map jobs=jobHistory.getAllJobs();
assertEquals(1,jobs.size());
assertEquals("job_0_0000",jobs.keySet().iterator().next().toString());
Task task=job.getTasks().values().iterator().next();
TaskAttempt attempt=task.getAttempts().values().iterator().next();
HistoryClientService historyService=historyServer.getClientService();
MRClientProtocol protocol=historyService.getClientHandler();
GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
TaskAttemptId taId=attempt.getID();
taId.setTaskId(task.getID());
taId.getTaskId().setJobId(job.getID());
gtarRequest.setTaskAttemptId(taId);
GetTaskAttemptReportResponse response=protocol.getTaskAttemptReport(gtarRequest);
assertEquals("container_0_0000_01_000000",response.getTaskAttemptReport().getContainerId().toString());
assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty());
assertNotNull(response.getTaskAttemptReport().getCounters().getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES));
assertEquals(taId.toString(),response.getTaskAttemptReport().getTaskAttemptId().toString());
GetTaskReportRequest request=recordFactory.newRecordInstance(GetTaskReportRequest.class);
TaskId taskId=task.getID();
taskId.setJobId(job.getID());
request.setTaskId(taskId);
GetTaskReportResponse reportResponse=protocol.getTaskReport(request);
assertEquals("",reportResponse.getTaskReport().getDiagnosticsList().iterator().next());
assertEquals(1.0f,reportResponse.getTaskReport().getProgress(),0.01);
assertEquals(taskId.toString(),reportResponse.getTaskReport().getTaskId().toString());
assertEquals(TaskState.SUCCEEDED,reportResponse.getTaskReport().getTaskState());
GetTaskAttemptCompletionEventsRequest taskAttemptRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
taskAttemptRequest.setJobId(job.getID());
GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse=protocol.getTaskAttemptCompletionEvents(taskAttemptRequest);
assertEquals(0,taskAttemptCompletionEventsResponse.getCompletionEventCount());
GetDiagnosticsRequest diagnosticRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
diagnosticRequest.setTaskAttemptId(taId);
GetDiagnosticsResponse diagnosticResponse=protocol.getDiagnostics(diagnosticRequest);
assertEquals(1,diagnosticResponse.getDiagnosticsCount());
assertEquals("",diagnosticResponse.getDiagnostics(0));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Trivial test case that verifies basic functionality of {@link JobIdHistoryFileInfoMap}
*/
@Test(timeout=2000) public void testWithSingleElement() throws InterruptedException {
JobIdHistoryFileInfoMap mapWithSize=new JobIdHistoryFileInfoMap();
JobId jobId=MRBuilderUtils.newJobId(1,1,1);
HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId);
assertEquals("Incorrect return on putIfAbsent()",null,mapWithSize.putIfAbsent(jobId,fileInfo1));
assertEquals("Incorrect return on putIfAbsent()",fileInfo1,mapWithSize.putIfAbsent(jobId,fileInfo1));
assertEquals("Incorrect get()",fileInfo1,mapWithSize.get(jobId));
assertTrue("Incorrect size()",checkSize(mapWithSize,1));
NavigableSet set=mapWithSize.navigableKeySet();
assertEquals("Incorrect navigableKeySet()",1,set.size());
assertTrue("Incorrect navigableKeySet()",set.contains(jobId));
Collection values=mapWithSize.values();
assertEquals("Incorrect values()",1,values.size());
assertTrue("Incorrect values()",values.contains(fileInfo1));
}
IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=1000) public void testEviction() throws InterruptedException {
int maxSize=2;
JobListCache cache=new JobListCache(maxSize,1000);
JobId jobId1=MRBuilderUtils.newJobId(1,1,1);
HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);
JobId jobId2=MRBuilderUtils.newJobId(2,2,2);
HistoryFileInfo fileInfo2=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);
JobId jobId3=MRBuilderUtils.newJobId(3,3,3);
HistoryFileInfo fileInfo3=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);
cache.addIfAbsent(fileInfo1);
cache.addIfAbsent(fileInfo2);
cache.addIfAbsent(fileInfo3);
Collection values;
for (int i=0; i < 9; i++) {
values=cache.values();
if (values.size() > maxSize) {
Thread.sleep(100);
}
else {
assertFalse("fileInfo1 should have been evicted",values.contains(fileInfo1));
return;
}
}
fail("JobListCache didn't delete the extra entry");
}
BooleanVerifier NullVerifier HybridVerifier
@Test public void testRefreshSuperUserGroups() throws Exception {
UserGroupInformation ugi=mock(UserGroupInformation.class);
UserGroupInformation superUser=mock(UserGroupInformation.class);
when(ugi.getRealUser()).thenReturn(superUser);
when(superUser.getShortUserName()).thenReturn("superuser");
when(superUser.getUserName()).thenReturn("superuser");
when(ugi.getGroupNames()).thenReturn(new String[]{"group3"});
when(ugi.getUserName()).thenReturn("regularUser");
conf.set("hadoop.proxyuser.superuser.groups","group1,group2");
conf.set("hadoop.proxyuser.superuser.hosts","127.0.0.1");
String[] args=new String[1];
args[0]="-refreshSuperUserGroupsConfiguration";
hsAdminClient.run(args);
Throwable th=null;
try {
ProxyUsers.authorize(ugi,"127.0.0.1");
}
catch ( Exception e) {
th=e;
}
assertTrue(th instanceof AuthorizationException);
conf.set("hadoop.proxyuser.superuser.groups","group1,group2,group3");
th=null;
try {
ProxyUsers.authorize(ugi,"127.0.0.1");
}
catch ( Exception e) {
th=e;
}
assertTrue(th instanceof AuthorizationException);
hsAdminClient.run(args);
th=null;
try {
ProxyUsers.authorize(ugi,"127.0.0.1");
}
catch ( Exception e) {
th=e;
}
assertNull("Unexpected exception thrown: " + th,th);
}
BooleanVerifier
@Test public void testRefreshAdminAcls() throws Exception {
conf.set(JHAdminConfig.JHS_ADMIN_ACL,UserGroupInformation.getCurrentUser().getUserName());
String[] args=new String[1];
args[0]="-refreshAdminAcls";
hsAdminClient.run(args);
args[0]="-refreshSuperUserGroupsConfiguration";
hsAdminClient.run(args);
conf.set(JHAdminConfig.JHS_ADMIN_ACL,"notCurrentUser");
args[0]="-refreshAdminAcls";
hsAdminClient.run(args);
Throwable th=null;
args[0]="-refreshSuperUserGroupsConfiguration";
try {
hsAdminClient.run(args);
}
catch ( Exception e) {
th=e;
}
assertTrue(th instanceof RemoteException);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappings() throws Exception {
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(conf);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
hsAdminClient.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
}
BooleanVerifier
/**
* test HsJobsBlock's rendering.
*/
@Test public void testHsJobsBlock(){
AppContext ctx=mock(AppContext.class);
Map jobs=new HashMap();
Job job=getJob();
jobs.put(job.getID(),job);
when(ctx.getAllJobs()).thenReturn(jobs);
HsJobsBlock block=new HsJobsBlockForTest(ctx);
PrintWriter pWriter=new PrintWriter(data);
Block html=new BlockForTest(new HtmlBlockForTest(),pWriter,0,false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains("JobName"));
assertTrue(data.toString().contains("UserName"));
assertTrue(data.toString().contains("QueueName"));
assertTrue(data.toString().contains("SUCCEEDED"));
}
BooleanVerifier
/**
* test HsTasksBlock's rendering.
*/
@Test public void testHsTasksBlock(){
Task task=getTask(0);
Map tasks=new HashMap();
tasks.put(task.getID(),task);
AppContext ctx=mock(AppContext.class);
AppForTest app=new AppForTest(ctx);
Job job=mock(Job.class);
when(job.getTasks()).thenReturn(tasks);
app.setJob(job);
HsTasksBlockForTest block=new HsTasksBlockForTest(app);
block.addParameter(AMParams.TASK_TYPE,"r");
PrintWriter pWriter=new PrintWriter(data);
Block html=new BlockForTest(new HtmlBlockForTest(),pWriter,0,false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains("task_0_0001_r_000000"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertTrue(data.toString().contains("100001"));
assertTrue(data.toString().contains("100011"));
assertTrue(data.toString().contains(""));
}
BooleanVerifier
/**
* test AttemptsBlock's rendering.
*/
@Test public void testAttemptsBlock(){
AppContext ctx=mock(AppContext.class);
AppForTest app=new AppForTest(ctx);
Task task=getTask(0);
Map attempts=new HashMap();
TaskAttempt attempt=mock(TaskAttempt.class);
TaskAttemptId taId=new TaskAttemptIdPBImpl();
taId.setId(0);
taId.setTaskId(task.getID());
when(attempt.getID()).thenReturn(taId);
when(attempt.getNodeHttpAddress()).thenReturn("Node address");
ApplicationId appId=ApplicationIdPBImpl.newInstance(0,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptIdPBImpl.newInstance(appId,1);
ContainerId containerId=ContainerIdPBImpl.newInstance(appAttemptId,1);
when(attempt.getAssignedContainerID()).thenReturn(containerId);
when(attempt.getAssignedContainerMgrAddress()).thenReturn("assignedContainerMgrAddress");
when(attempt.getNodeRackName()).thenReturn("nodeRackName");
final long taStartTime=100002L;
final long taFinishTime=100012L;
final long taShuffleFinishTime=100010L;
final long taSortFinishTime=100011L;
final TaskAttemptState taState=TaskAttemptState.SUCCEEDED;
when(attempt.getLaunchTime()).thenReturn(taStartTime);
when(attempt.getFinishTime()).thenReturn(taFinishTime);
when(attempt.getShuffleFinishTime()).thenReturn(taShuffleFinishTime);
when(attempt.getSortFinishTime()).thenReturn(taSortFinishTime);
when(attempt.getState()).thenReturn(taState);
TaskAttemptReport taReport=mock(TaskAttemptReport.class);
when(taReport.getStartTime()).thenReturn(taStartTime);
when(taReport.getFinishTime()).thenReturn(taFinishTime);
when(taReport.getShuffleFinishTime()).thenReturn(taShuffleFinishTime);
when(taReport.getSortFinishTime()).thenReturn(taSortFinishTime);
when(taReport.getContainerId()).thenReturn(containerId);
when(taReport.getProgress()).thenReturn(1.0f);
when(taReport.getStateString()).thenReturn("Processed 128/128 records \n");
when(taReport.getTaskAttemptState()).thenReturn(taState);
when(taReport.getDiagnosticInfo()).thenReturn("");
when(attempt.getReport()).thenReturn(taReport);
attempts.put(taId,attempt);
when(task.getAttempts()).thenReturn(attempts);
app.setTask(task);
Job job=mock(Job.class);
when(job.getUserName()).thenReturn("User");
app.setJob(job);
AttemptsBlockForTest block=new AttemptsBlockForTest(app);
block.addParameter(AMParams.TASK_TYPE,"r");
PrintWriter pWriter=new PrintWriter(data);
Block html=new BlockForTest(new HtmlBlockForTest(),pWriter,0,false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains("0 attempt_0_0001_r_000000_0"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertFalse(data.toString().contains("Processed 128/128 records
\n"));
assertTrue(data.toString().contains("Processed 128\\/128 records <p> \\n"));
assertTrue(data.toString().contains("_0005_01_000001:attempt_0_0001_r_000000_0:User:"));
assertTrue(data.toString().contains("100002"));
assertTrue(data.toString().contains("100010"));
assertTrue(data.toString().contains("100011"));
assertTrue(data.toString().contains("100012"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobCountersForKilledJob() throws Exception {
WebResource r=resource();
appContext=new MockHistoryContext(0,1,1,1,true);
injector=Guice.createInjector(new ServletModule(){
@Override protected void configureServlets(){
webApp=mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
}
);
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
WebServicesTestUtils.checkStringMatch("id",MRApps.toString(id),info.getString("id"));
assertTrue("Job shouldn't contain any counters",info.length() == 1);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList finishTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
finishTime.add(entry.getValue().getReport().getFinishTime());
}
Collections.sort(finishTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midFinishTime=finishTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList startTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
startTime.add(entry.getValue().getReport().getStartTime());
}
Collections.sort(startTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midStartTime=startTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStateNone() throws JSONException, Exception {
WebResource r=resource();
ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values()));
Map jobsMap=appContext.getAllJobs();
for ( Map.Entry entry : jobsMap.entrySet()) {
JOB_STATES.remove(entry.getValue().getState());
}
assertTrue("No unused job states",JOB_STATES.size() > 0);
JobState notInUse=JOB_STATES.get(0);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUserNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME_WITH_DELIMITER);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("User name not encoded correctly into job history file",jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Queue name not encoded correctly into job history file",jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME_WITH_DELIMITER);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Job name not encoded correctly into job history file",jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
}
BooleanVerifier
@Test(timeout=30000) public void testSetupDistributedCacheEmpty() throws IOException {
Configuration conf=new Configuration();
Map localResources=new HashMap();
MRApps.setupDistributedCache(conf,localResources);
assertTrue("Empty Config did not produce an empty list of resources",localResources.isEmpty());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithUserPrecendence(){
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
}
catch ( Exception e) {
fail("Got exception while setting classpath");
}
String env_str=env.get("CLASSPATH");
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",env_str.startsWith(expectedClasspath));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true);
Map env=new HashMap();
MRApps.setClasspath(env,conf);
String cp=env.get("CLASSPATH");
String appCp=env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD"));
String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp);
}
BooleanVerifier
@Test public void testSystemClasses(){
final List systemClasses=Arrays.asList(StringUtils.getTrimmedStrings(ApplicationClassLoader.DEFAULT_SYSTEM_CLASSES));
for ( String defaultXml : DEFAULT_XMLS) {
assertTrue(defaultXml + " must be system resource",ApplicationClassLoader.isSystemClass(defaultXml,systemClasses));
}
for ( String klass : SYS_CLASSES) {
assertTrue(klass + " must be system class",ApplicationClassLoader.isSystemClass(klass,systemClasses));
}
assertFalse("/fake/Klass must not be a system class",ApplicationClassLoader.isSystemClass("/fake/Klass",systemClasses));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLogSystemProperties() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG," ");
String value=MRApps.getSystemPropertiesToLog(conf);
assertNull(value);
String classpath="java.class.path";
String os="os.name";
String version="java.version";
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,classpath + ", " + os);
value=MRApps.getSystemPropertiesToLog(conf);
assertNotNull(value);
assertTrue(value.contains(classpath));
assertTrue(value.contains(os));
assertFalse(value.contains(version));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithNoUserPrecendence(){
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,false);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
}
catch ( Exception e) {
fail("Got exception while setting classpath");
}
String env_str=env.get("CLASSPATH");
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in" + " the classpath!",env_str.contains(expectedClasspath));
assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",env_str.startsWith(expectedClasspath));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspath() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
Map environment=new HashMap();
MRApps.setClasspath(environment,job.getConfiguration());
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String yarnAppClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (yarnAppClasspath != null) {
yarnAppClasspath=yarnAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
String mrAppClasspath=job.getConfiguration().get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH);
if (mrAppClasspath != null) {
mrAppClasspath=mrAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(mrAppClasspath));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000000) public void testSetClasspathWithFramework() throws IOException {
final String FRAMEWORK_NAME="some-framework-name";
final String FRAMEWORK_PATH="some-framework-path#" + FRAMEWORK_NAME;
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH,FRAMEWORK_PATH);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
fail("Failed to catch framework path set without classpath change");
}
catch ( IllegalArgumentException e) {
assertTrue("Unexpected IllegalArgumentException",e.getMessage().contains("Could not locate MapReduce framework name '" + FRAMEWORK_NAME + "'"));
}
env.clear();
final String FRAMEWORK_CLASSPATH=FRAMEWORK_NAME + "/*.jar";
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,FRAMEWORK_CLASSPATH);
MRApps.setClasspath(env,conf);
final String stdClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),FRAMEWORK_CLASSPATH,stdClasspath));
assertEquals("Incorrect classpath with framework and no user precedence",expectedClasspath,env.get("CLASSPATH"));
env.clear();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true);
MRApps.setClasspath(env,conf);
expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),stdClasspath,FRAMEWORK_CLASSPATH));
assertEquals("Incorrect classpath with framework and user precedence",expectedClasspath,env.get("CLASSPATH"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspathWithArchives() throws IOException {
File testTGZ=new File(testWorkDir,"test.tgz");
FileOutputStream out=new FileOutputStream(testTGZ);
out.write(0);
out.close();
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
conf=job.getConfiguration();
String testTGZQualifiedPath=FileSystem.getLocal(conf).makeQualified(new Path(testTGZ.getAbsolutePath())).toString();
conf.set(MRJobConfig.CLASSPATH_ARCHIVES,testTGZQualifiedPath);
conf.set(MRJobConfig.CACHE_ARCHIVES,testTGZQualifiedPath + "#testTGZ");
Map environment=new HashMap();
MRApps.setClasspath(environment,conf);
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String confClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (confClasspath != null) {
confClasspath=confClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(confClasspath));
assertTrue(environment.get("CLASSPATH").contains("testTGZ"));
}
BooleanVerifier
@Test public void testTaskStateUI(){
assertTrue(MRApps.TaskStateUI.PENDING.correspondsTo(TaskState.SCHEDULED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.SUCCEEDED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.FAILED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.KILLED));
assertTrue(MRApps.TaskStateUI.RUNNING.correspondsTo(TaskState.RUNNING));
}
InternalCallVerifier BooleanVerifier
@Test public void testCloseShouldCloseTheSocketWhichIsCreatedByInit() throws Exception {
AbstractMetricsContext context=new GangliaContext();
context.init("gangliaContext",ContextFactory.getFactory());
GangliaContext gangliaContext=(GangliaContext)context;
assertFalse("Socket already closed",gangliaContext.datagramSocket.isClosed());
context.close();
assertTrue("Socket not closed",gangliaContext.datagramSocket.isClosed());
}
BooleanVerifier
/**
* Should not throw if missing config files
*/
@Test public void testMissingFiles(){
MetricsConfig config=MetricsConfig.create("JobTracker","non-existent.properties");
assertTrue(config.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetricsAndJmx() throws Exception {
TestSource source=new TestSource("test");
MetricsSourceBuilder sb=MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s=sb.build();
List injectedTags=new ArrayList();
MetricsSourceAdapter sa=new MetricsSourceAdapter("test","test","test desc",s,injectedTags,null,null,1,false);
MetricsCollectorImpl builder=new MetricsCollectorImpl();
Iterable metricsRecords=sa.getMetrics(builder,true);
MetricsRecordImpl metricsRecord=metricsRecords.iterator().next();
assertEquals(0L,metricsRecord.metrics().iterator().next().value().longValue());
Thread.sleep(100);
assertEquals(0L,(Number)sa.getAttribute("C1"));
source.incrementCnt();
builder=new MetricsCollectorImpl();
metricsRecords=sa.getMetrics(builder,true);
metricsRecord=metricsRecords.iterator().next();
assertTrue(metricsRecord.metrics().iterator().hasNext());
Thread.sleep(100);
assertEquals(1L,(Number)sa.getAttribute("C1"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiThreadedPublish() throws Exception {
final int numThreads=10;
new ConfigBuilder().add("*.period",80).add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,numThreads).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
final MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
final CollectingSink sink=new CollectingSink(numThreads);
ms.registerSink("collector","Collector of values from all threads.",sink);
final TestSource[] sources=new TestSource[numThreads];
final Thread[] threads=new Thread[numThreads];
final String[] results=new String[numThreads];
final CyclicBarrier barrier1=new CyclicBarrier(numThreads), barrier2=new CyclicBarrier(numThreads);
for (int i=0; i < numThreads; i++) {
sources[i]=ms.register("threadSource" + i,"A source of my threaded goodness.",new TestSource("threadSourceRec" + i));
threads[i]=new Thread(new Runnable(){
private boolean safeAwait( int mySource, CyclicBarrier barrier){
try {
barrier1.await(2,TimeUnit.SECONDS);
}
catch ( InterruptedException e) {
results[mySource]="Interrupted";
return false;
}
catch ( BrokenBarrierException e) {
results[mySource]="Broken Barrier";
return false;
}
catch ( TimeoutException e) {
results[mySource]="Timed out on barrier";
return false;
}
return true;
}
@Override public void run(){
int mySource=Integer.parseInt(Thread.currentThread().getName());
if (sink.collected[mySource].get() != 0L) {
results[mySource]="Someone else collected my metric!";
return;
}
if (!safeAwait(mySource,barrier1)) return;
sources[mySource].g1.set(230);
ms.publishMetricsNow();
if (!safeAwait(mySource,barrier2)) return;
if (sink.collected[mySource].get() != 230L) {
results[mySource]="Metric not collected!";
return;
}
results[mySource]="Passed";
}
}
,"" + i);
}
for ( Thread t : threads) t.start();
for ( Thread t : threads) t.join();
assertEquals(0L,ms.droppedPubAll.value());
assertTrue(StringUtils.join("\n",Arrays.asList(results)),Iterables.all(Arrays.asList(results),new Predicate(){
@Override public boolean apply( @Nullable String input){
return input.equalsIgnoreCase("Passed");
}
}
));
ms.stop();
ms.shutdown();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHangingSink(){
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec"));
s.c1.incr();
HangingSink hanging=new HangingSink();
ms.registerSink("hanging","Hang the sink!",hanging);
ms.publishMetricsNow();
assertEquals(1L,ms.droppedPubAll.value());
assertFalse(hanging.getInterrupted());
ms.stop();
ms.shutdown();
assertTrue(hanging.getInterrupted());
assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test concurrent consumer access, which is illegal
* @throws Exception
*/
@Test public void testConcurrentConsumers() throws Exception {
final SinkQueue q=newSleepingConsumerQueue(2,1);
assertTrue("should enqueue",q.enqueue(2));
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
shouldThrowCME(new Fun(){
@Override public void run(){
q.clear();
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consume(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consumeAll(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.dequeue();
}
}
);
assertEquals("queue size",2,q.size());
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test common use case
* @throws Exception
*/
@Test public void testCommon() throws Exception {
final SinkQueue q=new SinkQueue(2);
q.enqueue(1);
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",1,(int)q.back());
assertEquals("element",1,(int)q.dequeue());
assertTrue("should enqueue",q.enqueue(2));
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",2,(int)e);
}
}
);
assertTrue("should enqueue",q.enqueue(3));
assertEquals("element",3,(int)q.dequeue());
assertEquals("queue size",0,q.size());
assertEquals("queue front",null,q.front());
assertEquals("queue back",null,q.back());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test nonblocking enqueue when queue is full
* @throws Exception
*/
@Test public void testFull() throws Exception {
final SinkQueue q=new SinkQueue(1);
q.enqueue(1);
assertTrue("should drop",!q.enqueue(2));
assertEquals("element",1,(int)q.dequeue());
q.enqueue(3);
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",3,(int)e);
}
}
);
assertEquals("queue size",0,q.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test consumers that take their time.
* @throws Exception
*/
@Test public void testHangingConsumer() throws Exception {
SinkQueue q=newSleepingConsumerQueue(2,1,2);
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
assertEquals("queue size",2,q.size());
assertEquals("queue head",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the consumeAll method
* @throws Exception
*/
@Test public void testConsumeAll() throws Exception {
final int capacity=64;
final SinkQueue q=new SinkQueue(capacity);
for (int i=0; i < capacity; ++i) {
assertTrue("should enqueue",q.enqueue(i));
}
assertTrue("should not enqueue",!q.enqueue(capacity));
final Runnable trigger=mock(Runnable.class);
q.consumeAll(new Consumer(){
private int expected=0;
@Override public void consume( Integer e){
assertEquals("element",expected++,(int)e);
trigger.run();
}
}
);
verify(trigger,times(capacity)).run();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test various factory methods
*/
@Test public void testNewMetrics(){
final MetricsRegistry r=new MetricsRegistry("test");
r.newCounter("c1","c1 desc",1);
r.newCounter("c2","c2 desc",2L);
r.newGauge("g1","g1 desc",3);
r.newGauge("g2","g2 desc",4L);
r.newStat("s1","s1 desc","ops","time");
assertEquals("num metrics in registry",5,r.metrics().size());
assertTrue("c1 found",r.get("c1") instanceof MutableCounterInt);
assertTrue("c2 found",r.get("c2") instanceof MutableCounterLong);
assertTrue("g1 found",r.get("g1") instanceof MutableGaugeInt);
assertTrue("g2 found",r.get("g2") instanceof MutableGaugeLong);
assertTrue("s1 found",r.get("s1") instanceof MutableStat);
expectMetricsException("Metric name c1 already exists",new Runnable(){
@Override public void run(){
r.newCounter("c1","test dup",0);
}
}
);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=6000) public void testFileSink() throws IOException {
outFile=getTestTempFile("test-file-sink-",".out");
final String outPath=outFile.getAbsolutePath();
new ConfigBuilder().add("*.period",10000).add("test.sink.mysink0.class",FileSink.class.getName()).add("test.sink.mysink0.filename",outPath).add("test.sink.mysink0.context","test1").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("test");
ms.start();
final MyMetrics1 mm1=new MyMetrics1().registerWith(ms);
new MyMetrics2().registerWith(ms);
mm1.testMetric1.incr();
mm1.testMetric2.incr(2);
ms.publishMetricsNow();
ms.stop();
ms.shutdown();
InputStream is=null;
ByteArrayOutputStream baos=null;
String outFileContent=null;
try {
is=new FileInputStream(outFile);
baos=new ByteArrayOutputStream((int)outFile.length());
IOUtils.copyBytes(is,baos,1024,true);
outFileContent=new String(baos.toByteArray(),"UTF-8");
}
finally {
IOUtils.cleanup(null,baos,is);
}
Pattern expectedContentPattern=Pattern.compile("^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" + "(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," + "\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)"+ "$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1,"+ "\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*",Pattern.MULTILINE);
assertTrue(expectedContentPattern.matcher(outFileContent).matches());
}
TestCleaner BranchVerifier BooleanVerifier HybridVerifier
@After public void after(){
if (outFile != null) {
outFile.delete();
assertTrue(!outFile.exists());
}
}
InternalCallVerifier BooleanVerifier
/**
* Make sure metrics tag has a sane hashCode impl
*/
@Test public void testNullTag(){
MetricsCache cache=new MetricsCache();
MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t",null)),Arrays.asList(makeMetric("m",0),makeMetric("m1",1)));
MetricsCache.Record cr=cache.update(mr);
assertTrue("t value should be null",null == cr.getTag("t"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Correctness test that checks that absolute error of the estimate is within
* specified error bounds for some randomly permuted streams of items.
*/
@Test public void testQuantileError() throws IOException {
final int count=100000;
Random r=new Random(0xDEADDEAD);
Long[] values=new Long[count];
for (int i=0; i < count; i++) {
values[i]=(long)(i + 1);
}
for (int i=0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values),r);
estimator.clear();
for (int j=0; j < count; j++) {
estimator.insert(values[j]);
}
Map snapshot;
snapshot=estimator.snapshot();
for ( Quantile q : quantiles) {
long actual=(long)(q.quantile * count);
long error=(long)(q.error * count);
long estimate=snapshot.get(q);
System.out.println(String.format("Expected %d with error %d, estimated %d",actual,error,estimate));
assertTrue(estimate <= actual + error);
assertTrue(estimate >= actual - error);
}
}
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test public void testGetLocalHostIsFast() throws Exception {
String hostname1=DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2=DNS.getDefaultHost(DEFAULT);
long t1=Time.now();
String hostname3=DNS.getDefaultHost(DEFAULT);
long t2=Time.now();
assertEquals(hostname3,hostname2);
assertEquals(hostname2,hostname1);
long interval=t2 - t1;
assertTrue("Took too long to determine local host - caching is not working",interval < 20000);
}
APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier
/**
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@Test public void testIsLocalAddress() throws Exception {
assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
Enumeration interfaces=NetworkInterface.getNetworkInterfaces();
if (interfaces != null) {
while (interfaces.hasMoreElements()) {
NetworkInterface i=interfaces.nextElement();
Enumeration addrs=i.getInetAddresses();
if (addrs == null) {
continue;
}
while (addrs.hasMoreElements()) {
InetAddress addr=addrs.nextElement();
assertTrue(NetUtils.isLocalAddress(addr));
}
}
}
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
}
UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that we can't accidentally connect back to the connecting socket due
* to a quirk in the TCP spec.
* This is a regression test for HADOOP-6722.
*/
@Test public void testAvoidLoopbackTcpSockets() throws Exception {
Configuration conf=new Configuration();
Socket socket=NetUtils.getDefaultSocketFactory(conf).createSocket();
socket.bind(new InetSocketAddress("127.0.0.1",0));
System.err.println("local address: " + socket.getLocalAddress());
System.err.println("local port: " + socket.getLocalPort());
try {
NetUtils.connect(socket,new InetSocketAddress(socket.getLocalAddress(),socket.getLocalPort()),20000);
socket.close();
fail("Should not have connected");
}
catch ( ConnectException ce) {
System.err.println("Got exception: " + ce);
assertTrue(ce.getMessage().contains("resulted in a loopback"));
}
catch ( SocketException se) {
assertTrue(se.getMessage().contains("Invalid argument"));
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test public void testNormalizeHostName(){
List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"});
List normalizedHosts=NetUtils.normalizeHostNames(hosts);
assertEquals(normalizedHosts.get(0),hosts.get(0));
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1),hosts.get(0));
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
assertEquals(normalizedHosts.get(3),hosts.get(3));
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("8.8.8.8","/d2/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
assertFalse(cluster.contains(nodeNotInMap));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
for (int i=0; i < dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < dataNodes.length; i++) {
assertFalse(cluster.contains(dataNodes[i]));
}
assertEquals(0,cluster.getNumOfLeaves());
for (int i=0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
String racks[]={"/a/b","/c"};
String hosts[]={"foo1.example.com","foo2.example.com"};
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn=cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
DatanodeInfo[] info;
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx=validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation());
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
}
else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster=new NetworkTopology();
DatanodeDescriptor invalDataNodes[]=new DatanodeDescriptor[]{DFSTestUtil.getDatanodeDescriptor("1.1.1.1","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("2.2.2.2","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1")};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
try {
invalCluster.add(invalDataNodes[2]);
fail("expected InvalidTopologyException");
}
catch ( NetworkTopology.InvalidTopologyException e) {
assertTrue(e.getMessage().startsWith("Failed to add "));
assertTrue(e.getMessage().contains("You cannot have a rack and a non-rack node at the same " + "level of the network topology."));
}
}
APIUtilityVerifier BooleanVerifier
/**
* This test checks that chooseRandom works for an excluded node.
*/
@Test public void testChooseRandomExcludedNode(){
String scope="~" + NodeBase.getPath(dataNodes[0]);
Map frequency=pickNodesAtRandom(100,scope);
for ( Node key : dataNodes) {
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test public void testChooseRandomExcludedRack(){
Map frequency=pickNodesAtRandom(100,"~" + "/d2");
for (int j=0; j < dataNodes.length; j++) {
int freq=frequency.get(dataNodes[j]);
if (dataNodes[j].getNetworkLocation().startsWith("/d2")) {
assertEquals(0,freq);
}
else {
assertTrue(freq > 0);
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRacks() throws Exception {
assertEquals(cluster.getNumOfRacks(),6);
assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1]));
assertFalse(cluster.isOnSameRack(dataNodes[1],dataNodes[2]));
assertTrue(cluster.isOnSameRack(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4]));
assertFalse(cluster.isOnSameRack(dataNodes[4],dataNodes[5]));
assertTrue(cluster.isOnSameRack(dataNodes[5],dataNodes[6]));
}
BooleanVerifier
@Test public void testSortByDistance() throws Exception {
DatanodeDescriptor[] testNodes=new DatanodeDescriptor[3];
testNodes[0]=dataNodes[1];
testNodes[1]=dataNodes[2];
testNodes[2]=dataNodes[0];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
DatanodeDescriptor[] dtestNodes=new DatanodeDescriptor[5];
dtestNodes[0]=dataNodes[8];
dtestNodes[1]=dataNodes[12];
dtestNodes[2]=dataNodes[11];
dtestNodes[3]=dataNodes[9];
dtestNodes[4]=dataNodes[10];
cluster.sortByDistance(dataNodes[8],dtestNodes,dtestNodes.length - 2,0xDEADBEEF,false);
assertTrue(dtestNodes[0] == dataNodes[8]);
assertTrue(dtestNodes[1] == dataNodes[11]);
assertTrue(dtestNodes[2] == dataNodes[12]);
assertTrue(dtestNodes[3] == dataNodes[9]);
assertTrue(dtestNodes[4] == dataNodes[10]);
testNodes[0]=dataNodes[1];
testNodes[1]=dataNodes[3];
testNodes[2]=dataNodes[0];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[3]);
testNodes[0]=dataNodes[5];
testNodes[1]=dataNodes[3];
testNodes[2]=dataNodes[1];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
testNodes[0]=dataNodes[1];
testNodes[1]=dataNodes[5];
testNodes[2]=dataNodes[3];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
testNodes[0]=dataNodes[1];
testNodes[1]=dataNodes[5];
testNodes[2]=dataNodes[3];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEAD,false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[5]);
assertTrue(testNodes[2] == dataNodes[3]);
DatanodeDescriptor first=null;
boolean foundRandom=false;
for (int i=5; i <= 7; i++) {
testNodes[0]=dataNodes[5];
testNodes[1]=dataNodes[6];
testNodes[2]=dataNodes[7];
cluster.sortByDistance(dataNodes[i],testNodes,testNodes.length,0xBEADED + i,false);
if (first == null) {
first=testNodes[0];
}
else {
if (first != testNodes[0]) {
foundRandom=true;
break;
}
}
}
assertTrue("Expected to find a different first location",foundRandom);
first=null;
for (int i=1; i <= 4; i++) {
testNodes[0]=dataNodes[13];
testNodes[1]=dataNodes[14];
testNodes[2]=dataNodes[15];
cluster.sortByDistance(dataNodes[15 + i],testNodes,testNodes.length,0xBEADED,true);
if (first == null) {
first=testNodes[0];
}
else {
if (first != testNodes[0]) {
foundRandom=true;
break;
}
}
}
assertTrue("Expected to find a different first location",foundRandom);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeGroups() throws Exception {
assertEquals(3,cluster.getNumOfRacks());
assertTrue(cluster.isOnSameNodeGroup(dataNodes[0],dataNodes[1]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[1],dataNodes[2]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameNodeGroup(dataNodes[3],dataNodes[4]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[4],dataNodes[5]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[5],dataNodes[6]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[6],dataNodes[7]));
}
BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* This test checks that adding a node with invalid topology will be failed
* with an exception to show topology is invalid.
*/
@Test public void testAddNodeWithInvalidTopology(){
try {
cluster.add(rackOnlyNode);
fail("Exception should be thrown, so we should not have reached here.");
}
catch ( Exception e) {
if (!(e instanceof IllegalArgumentException)) {
fail("Expecting IllegalArgumentException, but caught:" + e);
}
assertTrue(e.getMessage().contains("illegal network location"));
}
}
BooleanVerifier
@Test public void testSortByDistance() throws Exception {
NodeBase[] testNodes=new NodeBase[4];
testNodes[0]=dataNodes[1];
testNodes[1]=dataNodes[2];
testNodes[2]=dataNodes[3];
testNodes[3]=dataNodes[0];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
assertTrue(testNodes[3] == dataNodes[3]);
testNodes[0]=dataNodes[3];
testNodes[1]=dataNodes[4];
testNodes[2]=dataNodes[1];
testNodes[3]=dataNodes[0];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
testNodes[0]=dataNodes[5];
testNodes[1]=dataNodes[3];
testNodes[2]=dataNodes[2];
testNodes[3]=dataNodes[0];
cluster.sortByDistance(dataNodes[0],testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[2]);
testNodes[0]=dataNodes[6];
testNodes[1]=dataNodes[7];
testNodes[2]=dataNodes[2];
testNodes[3]=dataNodes[0];
cluster.sortByDistance(computeNode,testNodes,testNodes.length,0xDEADBEEF,false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[2]);
}
APIUtilityVerifier BooleanVerifier
/**
* Test replica placement policy in case last node is invalid.
* We create 6 nodes but the last node is in fault topology (with rack info),
* so cannot be added to cluster. We should test proper exception is thrown in
* adding node but shouldn't affect the cluster.
*/
@Test public void testChooseRandomExcludedNode(){
String scope="~" + NodeBase.getPath(dataNodes[0]);
Map frequency=pickNodesAtRandom(100,scope);
for ( Node key : dataNodes) {
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRacks() throws Exception {
assertEquals(3,cluster.getNumOfRacks());
assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1]));
assertTrue(cluster.isOnSameRack(dataNodes[1],dataNodes[2]));
assertFalse(cluster.isOnSameRack(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4]));
assertTrue(cluster.isOnSameRack(dataNodes[4],dataNodes[5]));
assertFalse(cluster.isOnSameRack(dataNodes[5],dataNodes[6]));
assertTrue(cluster.isOnSameRack(dataNodes[6],dataNodes[7]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
BooleanVerifier
@Test public void testNullConfig() throws Throwable {
ScriptBasedMapping mapping=createMapping(null);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
BooleanVerifier
@Test public void testNullConfig() throws Throwable {
ScriptBasedMapping mapping=createMapping(null);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testSocketIOWithTimeout() throws Exception {
Pipe pipe=Pipe.open();
Pipe.SourceChannel source=pipe.source();
Pipe.SinkChannel sink=pipe.sink();
try {
final InputStream in=new SocketInputStream(source,TIMEOUT);
OutputStream out=new SocketOutputStream(sink,TIMEOUT);
byte[] writeBytes=TEST_STRING.getBytes();
byte[] readBytes=new byte[writeBytes.length];
byte byteWithHighBit=(byte)0x80;
out.write(writeBytes);
out.write(byteWithHighBit);
doIO(null,out,TIMEOUT);
in.read(readBytes);
assertTrue(Arrays.equals(writeBytes,readBytes));
assertEquals(byteWithHighBit & 0xff,in.read());
doIO(in,null,TIMEOUT);
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
doIO(in,null,TIMEOUT * 2);
((SocketInputStream)in).setTimeout(0);
TestingThread thread=new TestingThread(ctx){
@Override public void doWork() throws Exception {
try {
in.read();
fail("Did not fail with interrupt");
}
catch ( InterruptedIOException ste) {
LOG.info("Got expection while reading as expected : " + ste.getMessage());
}
}
}
;
ctx.addThread(thread);
ctx.startThreads();
Thread.sleep(1000);
thread.interrupt();
ctx.stop();
assertTrue(source.isOpen());
assertTrue(sink.isOpen());
if (!Shell.WINDOWS && !Shell.PPC_64) {
try {
out.write(1);
fail("Did not throw");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("stream is closed",ioe);
}
}
out.close();
assertFalse(sink.isOpen());
assertEquals(-1,in.read());
in.close();
assertFalse(source.isOpen());
}
finally {
if (source != null) {
source.close();
}
if (sink != null) {
sink.close();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
* @throws Throwable on any problem
*/
@Test public void testCachingRelaysStringOperations() throws Throwable {
Configuration conf=new Configuration();
String scriptname="mappingscript.sh";
conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,scriptname);
ScriptBasedMapping scriptMapping=new ScriptBasedMapping(conf);
assertTrue("Did not find " + scriptname + " in "+ scriptMapping,scriptMapping.toString().contains(scriptname));
CachedDNSToSwitchMapping mapping=new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + scriptname + " in "+ mapping,mapping.toString().contains(scriptname));
}
BooleanVerifier
/**
* Verify the switch mapping query handles arbitrary DNSToSwitchMapping
* implementations
* @throws Throwable on any problem
*/
@Test public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
DNSToSwitchMapping mapping=new StandaloneSwitchMapping();
assertFalse("Expected to be multi switch " + mapping,AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
InternalCallVerifier BooleanVerifier
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
* @throws Throwable on any problem
*/
@Test public void testCachingRelaysStringOperationsToNullScript() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping scriptMapping=new ScriptBasedMapping(conf);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT + " in "+ scriptMapping,scriptMapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
CachedDNSToSwitchMapping mapping=new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT + " in "+ mapping,mapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
}
BooleanVerifier
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
* @throws Throwable on any problem
*/
@Test public void testCachingRelays() throws Throwable {
CachedDNSToSwitchMapping mapping=new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
assertFalse("Expected to be multi switch " + mapping,mapping.isSingleSwitch());
}
BooleanVerifier
@Test public void testNullMapping(){
assertFalse(AbstractDNSToSwitchMapping.isMappingSingleSwitch(null));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test setting some server options.
* @throws IOException
*/
@Test(timeout=180000) public void testServerOptions() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath();
DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
try {
int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize=bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize);
int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize,nextBufSize);
int newTimeout=1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout);
int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout,nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
}
catch ( SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ",e);
}
}
finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
/**
* Test file descriptor passing.
* @throws IOException
*/
@Test(timeout=180000) public void testFdPassing() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock").getAbsolutePath();
final byte clientMsg1[]=new byte[]{0x11,0x22,0x33,0x44,0x55,0x66};
final byte serverMsg1[]=new byte[]{0x31,0x30,0x32,0x34,0x31,0x33,0x44,0x1,0x1,0x1,0x1,0x1};
final ArrayBlockingQueue threadResults=new ArrayBlockingQueue(2);
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
final PassedFile passedFiles[]=new PassedFile[]{new PassedFile(1),new PassedFile(2)};
final FileDescriptor passedFds[]=new FileDescriptor[passedFiles.length];
for (int i=0; i < passedFiles.length; i++) {
passedFds[i]=passedFiles[i].getInputStream().getFD();
}
Thread serverThread=new Thread(){
public void run(){
DomainSocket conn=null;
try {
conn=serv.accept();
byte in1[]=new byte[clientMsg1.length];
InputStream connInputStream=conn.getInputStream();
IOUtils.readFully(connInputStream,in1,0,in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1,in1));
DomainSocket domainConn=(DomainSocket)conn;
domainConn.sendFileDescriptors(passedFds,serverMsg1,0,serverMsg1.length);
conn.close();
}
catch ( Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
}
;
serverThread.start();
Thread clientThread=new Thread(){
public void run(){
try {
DomainSocket client=DomainSocket.connect(TEST_PATH);
OutputStream clientOutputStream=client.getOutputStream();
InputStream clientInputStream=client.getInputStream();
clientOutputStream.write(clientMsg1);
DomainSocket domainConn=(DomainSocket)client;
byte in1[]=new byte[serverMsg1.length];
FileInputStream recvFis[]=new FileInputStream[passedFds.length];
int r=domainConn.recvFileInputStreams(recvFis,in1,0,in1.length - 1);
Assert.assertTrue(r > 0);
IOUtils.readFully(clientInputStream,in1,r,in1.length - r);
Assert.assertTrue(Arrays.equals(serverMsg1,in1));
for (int i=0; i < passedFds.length; i++) {
Assert.assertNotNull(recvFis[i]);
passedFiles[i].checkInputStream(recvFis[i]);
}
for ( FileInputStream fis : recvFis) {
fis.close();
}
client.close();
}
catch ( Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
}
;
clientThread.start();
for (int i=0; i < 2; i++) {
Throwable t=threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
serv.close();
for ( PassedFile pf : passedFiles) {
pf.cleanup();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testShutdown() throws Exception {
final AtomicInteger bytesRead=new AtomicInteger(0);
final AtomicBoolean failed=new AtomicBoolean(false);
final DomainSocket[] socks=DomainSocket.socketpair();
Runnable reader=new Runnable(){
@Override public void run(){
while (true) {
try {
int ret=socks[1].getInputStream().read();
if (ret == -1) return;
bytesRead.addAndGet(1);
}
catch ( IOException e) {
DomainSocket.LOG.error("reader error",e);
failed.set(true);
return;
}
}
}
}
;
Thread readerThread=new Thread(reader);
readerThread.start();
socks[0].getOutputStream().write(1);
socks[0].getOutputStream().write(2);
socks[0].getOutputStream().write(3);
Assert.assertTrue(readerThread.isAlive());
socks[0].shutdown();
readerThread.join();
Assert.assertFalse(failed.get());
Assert.assertEquals(3,bytesRead.get());
IOUtils.cleanup(null,socks);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testExactAddressRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1 + " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIdOutOfIntegerRange() throws IOException {
String GET_ALL_USERS_CMD="echo \"" + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n" + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"+ "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"+ "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"+ "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD="echo \"" + "hdfs:*:11501:hrt_hdfs\n" + "rpcuser:*:29:\n"+ "nfsnobody:*:4294967294:\n"+ "nfsnobody1:*:4294967295:\n"+ "maxint:*:2147483647:\n"+ "minint:*:2147483648:\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3";
BiMap uMap=HashBiMap.create();
BiMap gMap=HashBiMap.create();
IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(uMap.size() == 7);
assertEquals("nfsnobody",uMap.get(-2));
assertEquals("nfsnobody1",uMap.get(-1));
assertEquals("maxint",uMap.get(2147483647));
assertEquals("minint",uMap.get(-2147483648));
assertEquals("archivebackup",uMap.get(1031));
assertEquals("hdfs",uMap.get(11501));
assertEquals("daemon",uMap.get(2));
IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 7);
assertEquals("hdfs",gMap.get(11501));
assertEquals("rpcuser",gMap.get(29));
assertEquals("nfsnobody",gMap.get(-2));
assertEquals("nfsnobody1",gMap.get(-1));
assertEquals("maxint",gMap.get(2147483647));
assertEquals("minint",gMap.get(-2147483648));
assertEquals("mapred3",gMap.get(498));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDuplicates() throws IOException {
String GET_ALL_USERS_CMD="echo \"root:x:0:0:root:/root:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "bin:x:2:2:bin:/bin:/bin/sh\n"+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:497\n"+ "mapred:x:498\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3";
BiMap uMap=HashBiMap.create();
BiMap gMap=HashBiMap.create();
IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertEquals(5,uMap.size());
assertEquals("root",uMap.get(0));
assertEquals("hdfs",uMap.get(11501));
assertEquals("hdfs2",uMap.get(11502));
assertEquals("bin",uMap.get(2));
assertEquals("daemon",uMap.get(1));
IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 3);
assertEquals("hdfs",gMap.get(11501));
assertEquals("mapred",gMap.get(497));
assertEquals("mapred3",gMap.get(498));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleFrames(){
RpcFrameDecoder decoder=new RpcFrameDecoder();
byte[] fragment1=new byte[4 + 10];
fragment1[0]=0;
fragment1[1]=0;
fragment1[2]=0;
fragment1[3]=(byte)10;
assertFalse(XDR.isLastFragment(fragment1));
assertTrue(XDR.fragmentSize(fragment1) == 10);
ByteBuffer buffer=ByteBuffer.allocate(4 + 10);
buffer.put(fragment1);
buffer.flip();
ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
byte[] fragment2=new byte[4 + 10];
fragment2[0]=(byte)(1 << 7);
fragment2[1]=0;
fragment2[2]=0;
fragment2[3]=(byte)10;
assertTrue(XDR.isLastFragment(fragment2));
assertTrue(XDR.fragmentSize(fragment2) == 10);
buffer=ByteBuffer.allocate(4 + 10);
buffer.put(fragment2);
buffer.flip();
buf=new ByteBufferBackedChannelBuffer(buffer);
channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer != null);
assertEquals(20,channelBuffer.readableBytes());
}
InternalCallVerifier BooleanVerifier
@Test public void testSingleFrame(){
RpcFrameDecoder decoder=new RpcFrameDecoder();
ByteBuffer buffer=ByteBuffer.allocate(1);
ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
byte[] fragment=new byte[4 + 9];
fragment[0]=(byte)(1 << 7);
fragment[1]=0;
fragment[2]=0;
fragment[3]=(byte)10;
assertTrue(XDR.isLastFragment(fragment));
assertTrue(XDR.fragmentSize(fragment) == 10);
buffer=ByteBuffer.allocate(4 + 9);
buffer.put(fragment);
buffer.flip();
buf=new ByteBufferBackedChannelBuffer(buffer);
channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testCacheEntry(){
CacheEntry c=new CacheEntry();
validateInprogressCacheEntry(c);
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
RpcResponse response=mock(RpcResponse.class);
c.setResponse(response);
validateCompletedCacheEntry(c,response);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCacheFunctionality() throws UnknownHostException {
RpcCallCache cache=new RpcCallCache("Test",10);
int size=0;
for (int clientId=0; clientId < 20; clientId++) {
InetAddress clientIp=InetAddress.getByName("1.1.1." + clientId);
System.out.println("Adding " + clientIp);
cache.checkOrAddToCache(clientIp,0);
size=Math.min(++size,10);
System.out.println("Cache size " + cache.size());
assertEquals(size,cache.size());
int startEntry=Math.max(clientId - 10 + 1,0);
Iterator> iterator=cache.iterator();
for (int i=0; i < size; i++) {
ClientRequest key=iterator.next().getKey();
System.out.println("Entry " + key.getClientId());
assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),key.getClientId());
}
for (int i=0; i < size; i++) {
CacheEntry e=cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)),0);
assertNotNull(e);
assertTrue(e.isInProgress());
assertFalse(e.isCompleted());
}
}
}
BooleanVerifier
@Test(timeout=1000) public void testRegistration() throws IOException, InterruptedException {
XDR req=new XDR();
RpcCall.getInstance(++xid,RpcProgramPortmap.PROGRAM,RpcProgramPortmap.VERSION,RpcProgramPortmap.PMAPPROC_SET,new CredentialsNone(),new VerifierNone()).write(req);
PortmapMapping sent=new PortmapMapping(90000,1,PortmapMapping.TRANSPORT_TCP,1234);
sent.serialize(req);
byte[] reqBuf=req.getBytes();
DatagramSocket s=new DatagramSocket();
DatagramPacket p=new DatagramPacket(reqBuf,reqBuf.length,pm.getUdpServerLoAddress());
try {
s.send(p);
}
finally {
s.close();
}
Thread.sleep(100);
boolean found=false;
@SuppressWarnings("unchecked") Map map=(Map)Whitebox.getInternalState(pm.getHandler(),"map");
for ( PortmapMapping m : map.values()) {
if (m.getPort() == sent.getPort() && PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
found=true;
break;
}
}
Assert.assertTrue("Registration failed",found);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=1000) public void testIdle() throws InterruptedException, IOException {
Socket s=new Socket();
try {
s.connect(pm.getTcpServerLocalAddress());
int i=0;
while (!s.isConnected() && i < RETRY_TIMES) {
++i;
Thread.sleep(SHORT_TIMEOUT_MILLISECONDS);
}
Assert.assertTrue("Failed to connect to the server",s.isConnected() && i < RETRY_TIMES);
int b=s.getInputStream().read();
Assert.assertTrue("The server failed to disconnect",b == -1);
}
finally {
s.close();
}
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMapping() throws Exception {
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(john.name).get(0).equals(john.group));
assertTrue(groups.getGroups(hdfs.name).get(0).equals(hdfs.group));
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMappingWithoutCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY,"false");
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 1);
assertTrue(groups.getGroups(jack.name).get(0).equals(jack.group));
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMappingWithCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY,"true");
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 2);
assertTrue(groups.getGroups(jack.name).contains(jack.group));
assertTrue(groups.getGroups(jack.name).contains(jack.group2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
Credentials ts=new Credentials();
Token token1=new Token();
Token token2=new Token();
Text service1=new Text("service1");
Text service2=new Text("service2");
Collection services=new ArrayList();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"),token1);
ts.addToken(new Text("sometoken2"),token2);
final KeyGenerator kg=KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias="alias";
Map m=new HashMap(10);
for (int i=0; i < 10; i++) {
Key key=kg.generateKey();
m.put(new Text(alias + i),key.getEncoded());
ts.addSecretKey(new Text(alias + i),key.getEncoded());
}
File tmpFileName=new File(tmpDir,"tokenStorageTest");
DataOutputStream dos=new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
DataInputStream dis=new DataInputStream(new FileInputStream(tmpFileName));
ts=new Credentials();
ts.readFields(dis);
dis.close();
Collection> list=ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2",list.size(),2);
boolean foundFirst=false;
boolean foundSecond=false;
for ( Token extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst=true;
}
if (token.getService().equals(service2)) {
foundSecond=true;
}
}
assertTrue("Tokens for services service1 and service2 must be present",foundFirst && foundSecond);
int mapLen=m.size();
assertEquals("wrong number of keys in the Storage",mapLen,ts.numberOfSecretKeys());
for ( Text a : m.keySet()) {
byte[] kTS=ts.getSecretKey(a);
byte[] kLocal=m.get(a);
assertTrue("keys don't match for " + a,WritableComparator.compareBytes(kTS,0,kTS.length,kLocal,0,kLocal.length) == 0);
}
tmpFileName.delete();
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " + "test the normal path and 'mvn -DTestGroupFallback clear test' will" + " test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNetgroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " + "test the normal path and 'mvn -DTestGroupFallback clear test' will" + " test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testGroupsCaching() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,0);
Groups groups=new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.addToBlackList("user1");
assertTrue(groups.getGroups("me").size() == 2);
FakeGroupMapping.addToBlackList("me");
assertTrue(groups.getGroups("me").size() == 2);
try {
LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString());
fail();
}
catch ( IOException ioe) {
if (!ioe.getMessage().startsWith("No groups found")) {
LOG.error("Got unexpected exception: " + ioe.getMessage());
fail();
}
}
FakeGroupMapping.clearBlackList();
assertTrue(groups.getGroups("user1").size() == 2);
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupLookupForStaticUsers() throws Exception {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,FakeunPrivilegedGroupMapping.class,ShellBasedUnixGroupsMapping.class);
conf.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,"me=;user1=group1;user2=group1,group2");
Groups groups=new Groups(conf);
List userGroups=groups.getGroups("me");
assertTrue("non-empty groups for static user",userGroups.isEmpty());
assertFalse("group lookup done for static user",FakeunPrivilegedGroupMapping.invoked);
List expected=new ArrayList();
expected.add("group1");
FakeunPrivilegedGroupMapping.invoked=false;
userGroups=groups.getGroups("user1");
assertTrue("groups not correct",expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",FakeunPrivilegedGroupMapping.invoked);
expected.add("group2");
FakeunPrivilegedGroupMapping.invoked=false;
userGroups=groups.getGroups("user2");
assertTrue("groups not correct",expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",FakeunPrivilegedGroupMapping.invoked);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFilePermission() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
try {
FileSystem nnfs=FileSystem.get(conf);
assertFalse(nnfs.exists(CHILD_FILE1));
try {
nnfs.setOwner(CHILD_FILE1,"foo","bar");
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
try {
nnfs.setPermission(CHILD_FILE1,new FsPermission((short)0777));
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
FSDataOutputStream out=nnfs.create(CHILD_FILE1,new FsPermission((short)0777),true,1024,(short)1,1024,null);
FileStatus status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.delete(CHILD_FILE1,false);
nnfs.mkdirs(CHILD_DIR1);
out=nnfs.create(CHILD_FILE1);
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rw-r--r--"));
byte data[]=new byte[FILE_LEN];
RAN.nextBytes(data);
out.write(data);
out.close();
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwx------"));
byte dataIn[]=new byte[FILE_LEN];
FSDataInputStream fin=nnfs.open(CHILD_FILE1);
int bytesRead=fin.read(dataIn);
assertTrue(bytesRead == FILE_LEN);
for (int i=0; i < FILE_LEN; i++) {
assertEquals(data[i],dataIn[i]);
}
nnfs.setPermission(CHILD_FILE1,new FsPermission("755"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("744"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr--r--"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
userfs.mkdirs(CHILD_DIR1);
assertTrue(!canMkdirs(userfs,CHILD_DIR2));
assertTrue(!canCreate(userfs,CHILD_FILE2));
assertTrue(!canOpen(userfs,CHILD_FILE1));
nnfs.setPermission(ROOT_PATH,new FsPermission((short)0755));
nnfs.setPermission(CHILD_DIR1,new FsPermission("777"));
nnfs.setPermission(new Path("/"),new FsPermission((short)0777));
final Path RENAME_PATH=new Path("/foo/bar");
userfs.mkdirs(RENAME_PATH);
assertTrue(canRename(userfs,RENAME_PATH,CHILD_DIR1));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(config);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List g4=groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g4.size(); i++) {
assertFalse("Should be different group ",g3.get(i).equals(g4.get(i)));
}
}
BooleanVerifier
@Test public void isOriginalTGTReturnsCorrectValues(){
assertTrue(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("krbtgt/foo@foo")));
assertTrue(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("krbtgt/foo.bar.bat@foo.bar.bat")));
assertFalse(SecurityUtil.isTGSPrincipal(null));
assertFalse(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("blah")));
assertFalse(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("")));
assertFalse(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("krbtgt/hello")));
assertFalse(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("/@")));
assertFalse(SecurityUtil.isTGSPrincipal(new KerberosPrincipal("krbtgt/foo@FOO")));
}
BooleanVerifier
@Test public void testStartsWithIncorrectSettings() throws IOException {
Configuration conf=new Configuration();
SecurityUtil.setAuthenticationMethod(KERBEROS,conf);
String keyTabKey="key";
conf.set(keyTabKey,"");
UserGroupInformation.setConfiguration(conf);
boolean gotException=false;
try {
SecurityUtil.login(conf,keyTabKey,"","");
}
catch ( IOException e) {
gotException=true;
}
assertTrue("Exception for empty keytabfile name was expected",gotException);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* given user name - get all the groups.
* Needs to happen before creating the test users
*/
@Test(timeout=30000) public void testGetServerSideGroups() throws IOException, InterruptedException {
Process pp=Runtime.getRuntime().exec("whoami");
BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String userName=br.readLine().trim();
if (Shell.WINDOWS) {
int sp=userName.lastIndexOf('\\');
if (sp != -1) {
userName=userName.substring(sp + 1);
}
userName=userName.toLowerCase();
}
pp=Runtime.getRuntime().exec(Shell.WINDOWS ? Shell.WINUTILS + " groups -F" : "id -Gn");
br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String line=br.readLine();
System.out.println(userName + ":" + line);
Set groups=new LinkedHashSet();
String[] tokens=line.split(Shell.TOKEN_SEPARATOR_REGEX);
for ( String s : tokens) {
groups.add(s);
}
final UserGroupInformation login=UserGroupInformation.getCurrentUser();
String loginUserName=login.getShortUserName();
if (Shell.WINDOWS) {
loginUserName=loginUserName.toLowerCase();
}
assertEquals(userName,loginUserName);
String[] gi=login.getGroupNames();
assertEquals(groups.size(),gi.length);
for (int i=0; i < gi.length; i++) {
assertTrue(groups.contains(gi[i]));
}
final UserGroupInformation fakeUser=UserGroupInformation.createRemoteUser("foo.bar");
fakeUser.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
UserGroupInformation current=UserGroupInformation.getCurrentUser();
assertFalse(current.equals(login));
assertEquals(current,fakeUser);
assertEquals(0,current.getGroupNames().length);
return null;
}
}
);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEnsureInitWithRules() throws IOException {
String rules="RULE:[1:RULE1]";
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
UserGroupInformation.createUserForTesting("someone",new String[0]);
assertTrue(KerberosName.hasRulesBeenSet());
UserGroupInformation.reset();
KerberosName.setRules(rules);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules,KerberosName.getRules());
UserGroupInformation.createUserForTesting("someone",new String[0]);
assertEquals(rules,KerberosName.getRules());
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testSetConfigWithRules(){
String[] rules={"RULE:[1:TEST1]","RULE:[1:TEST2]","RULE:[1:TEST3]"};
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
KerberosName.setRules(rules[0]);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules[0],KerberosName.getRules());
UserGroupInformation.createUserForTesting("someone",new String[0]);
assertEquals(rules[0],KerberosName.getRules());
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[1]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[1],KerberosName.getRules());
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[2]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[2],KerberosName.getRules());
UserGroupInformation.createUserForTesting("someone",new String[0]);
assertEquals(rules[2],KerberosName.getRules());
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testUGITokens() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"});
Token t1=mock(Token.class);
when(t1.getService()).thenReturn(new Text("t1"));
Token t2=mock(Token.class);
when(t2.getService()).thenReturn(new Text("t2"));
Credentials creds=new Credentials();
byte[] secretKey=new byte[]{};
Text secretName=new Text("shhh");
creds.addSecretKey(secretName,secretKey);
ugi.addToken(t1);
ugi.addToken(t2);
ugi.addCredentials(creds);
Collection> z=ugi.getTokens();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2,z.size());
Credentials ugiCreds=ugi.getCredentials();
assertSame(secretKey,ugiCreds.getSecretKey(secretName));
assertEquals(1,ugiCreds.numberOfSecretKeys());
try {
z.remove(t1);
fail("Shouldn't be able to modify token collection from UGI");
}
catch ( UnsupportedOperationException uoe) {
}
Collection> otherSet=ugi.doAs(new PrivilegedExceptionAction>>(){
@Override public Collection> run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokens();
}
}
);
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
}
InternalCallVerifier BooleanVerifier
/**
* Test hasSufficientTimeElapsed method
*/
@Test(timeout=30000) public void testHasSufficientTimeElapsed() throws Exception {
Method method=UserGroupInformation.class.getDeclaredMethod("hasSufficientTimeElapsed",long.class);
method.setAccessible(true);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
User user=ugi.getSubject().getPrincipals(User.class).iterator().next();
long now=System.currentTimeMillis();
user.setLastLogin(now - 2 * 60 * 1000);
assertTrue((Boolean)method.invoke(ugi,now));
user.setLastLogin(now - 30 * 1000);
assertFalse((Boolean)method.invoke(ugi,now));
Configuration conf2=new Configuration(conf);
conf2.setLong(CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN,10 * 60);
UserGroupInformation.setConfiguration(conf2);
user.setLastLogin(now - 15 * 60 * 1000);
assertTrue((Boolean)method.invoke(ugi,now));
user.setLastLogin(now - 6 * 60 * 1000);
assertFalse((Boolean)method.invoke(ugi,now));
UserGroupInformation.setConfiguration(conf);
method.setAccessible(false);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLoginModuleCommit() throws Exception {
UserGroupInformation loginUgi=UserGroupInformation.getLoginUser();
User user1=loginUgi.getSubject().getPrincipals(User.class).iterator().next();
LoginContext login=user1.getLogin();
login.logout();
login.login();
User user2=loginUgi.getSubject().getPrincipals(User.class).iterator().next();
Assert.assertTrue(user1 == user2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test login method
*/
@Test(timeout=30000) public void testLogin() throws Exception {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,String.valueOf(PERCENTILES_INTERVAL));
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
assertEquals(UserGroupInformation.getCurrentUser(),UserGroupInformation.getLoginUser());
assertTrue(ugi.getGroupNames().length >= 1);
verifyGroupMetrics(1);
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
UserGroupInformation curUGI=userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
}
);
assertEquals(curUGI,userGroupInfo);
assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEqualsWithRealUser() throws Exception {
UserGroupInformation realUgi1=UserGroupInformation.createUserForTesting("RealUser",GROUP_NAMES);
UserGroupInformation proxyUgi1=UserGroupInformation.createProxyUser(USER_NAME,realUgi1);
UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi1.getSubject());
UserGroupInformation remoteUgi=UserGroupInformation.createRemoteUser(USER_NAME);
assertEquals(proxyUgi1,proxyUgi2);
assertFalse(remoteUgi.equals(proxyUgi1));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCreateRemoteUser(){
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1");
assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod());
assertTrue(ugi.toString().contains("(auth:SIMPLE)"));
ugi=UserGroupInformation.createRemoteUser("user1",AuthMethod.KERBEROS);
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
assertTrue(ugi.toString().contains("(auth:KERBEROS)"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testTokenIdentifiers() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"});
TokenIdentifier t1=mock(TokenIdentifier.class);
TokenIdentifier t2=mock(TokenIdentifier.class);
ugi.addTokenIdentifier(t1);
ugi.addTokenIdentifier(t2);
Collection z=ugi.getTokenIdentifiers();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2,z.size());
Collection otherSet=ugi.doAs(new PrivilegedExceptionAction>(){
@Override public Collection run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokenIdentifiers();
}
}
);
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
assertEquals(2,otherSet.size());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLoginObjectInSubject() throws Exception {
UserGroupInformation loginUgi=UserGroupInformation.getLoginUser();
UserGroupInformation anotherUgi=new UserGroupInformation(loginUgi.getSubject());
LoginContext login1=loginUgi.getSubject().getPrincipals(User.class).iterator().next().getLogin();
LoginContext login2=anotherUgi.getSubject().getPrincipals(User.class).iterator().next().getLogin();
Assert.assertTrue(login1 == login2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEquals() throws Exception {
UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertEquals(uugi,uugi);
UserGroupInformation ugi2=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertFalse(uugi.equals(ugi2));
assertFalse(uugi.hashCode() == ugi2.hashCode());
UserGroupInformation ugi3=new UserGroupInformation(uugi.getSubject());
assertEquals(uugi,ugi3);
assertEquals(uugi.hashCode(),ugi3.hashCode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidProvider() throws Exception {
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","sdff://file/tmp/credstore.jceks"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPromptForCredential() throws Exception {
String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList passwords=new ArrayList();
passwords.add("p@ssw0rd");
passwords.add("p@ssw0rd");
int rc=0;
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc=shell.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "created."));
String[] args2={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=shell.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
String[] args1={"create","credential1"};
int rc=0;
CredentialShell cs=new CredentialShell();
Configuration config=new Configuration();
config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"user:///");
cs.setConf(config);
rc=cs.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCredentialSuccessfulLifecycle() throws Exception {
outContent.reset();
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "created."));
outContent.reset();
String[] args2={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1"));
outContent.reset();
String[] args4={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args4);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
outContent.reset();
String[] args5={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args5);
assertEquals(0,rc);
assertFalse(outContent.toString(),outContent.toString().contains("credential1"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","user:///"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
String[] args2={"delete","credential1","-provider","user:///"};
rc=cs.run(args2);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPromptForCredentialWithEmptyPasswd() throws Exception {
String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList passwords=new ArrayList();
passwords.add(null);
passwords.add("p@ssw0rd");
int rc=0;
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc=shell.run(args1);
assertEquals(outContent.toString(),1,rc);
assertTrue(outContent.toString().contains("Passwords don't match"));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFactoryErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unknown:///");
try {
List providers=CredentialProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("No CredentialProviderFactory for unknown:/// in " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,e.getMessage());
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUriErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unkn@own:/x/y");
try {
List providers=CredentialProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("Bad configuration of " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage());
}
}
InternalCallVerifier BooleanVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testExtractTokenFail() throws Exception {
HttpURLConnection conn=Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_UNAUTHORIZED);
String tokenStr="foo";
Map> headers=new HashMap>();
List cookies=new ArrayList();
cookies.add(AuthenticatedURL.AUTH_COOKIE + "=" + tokenStr);
headers.put("Set-Cookie",cookies);
Mockito.when(conn.getHeaderFields()).thenReturn(headers);
AuthenticatedURL.Token token=new AuthenticatedURL.Token();
token.set("bar");
try {
AuthenticatedURL.extractToken(conn,token);
Assert.fail();
}
catch ( AuthenticationException ex) {
Assert.assertFalse(token.isSet());
}
catch ( Exception ex) {
Assert.fail();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToken() throws Exception {
AuthenticatedURL.Token token=new AuthenticatedURL.Token();
Assert.assertFalse(token.isSet());
token=new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet());
Assert.assertEquals("foo",token.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testNotAuthenticated() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null);
}
finally {
auth.stop();
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetTokenExpired() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
AuthenticationToken token=new AuthenticationToken("u","p",DummyAuthenticationHandler.TYPE);
token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
boolean failed=false;
try {
filter.getToken(request);
}
catch ( AuthenticationException ex) {
Assert.assertEquals("AuthenticationToken expired",ex.getMessage());
failed=true;
}
finally {
Assert.assertTrue("token not expired",failed);
}
}
finally {
filter.destroy();
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInit() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass());
Assert.assertTrue(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
Assert.assertNull(filter.getCookieDomain());
Assert.assertNull(filter.getCookiePath());
Assert.assertEquals(TOKEN_VALIDITY_SEC,filter.getValidity());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(new SignerSecretProvider(){
@Override public void init( Properties config, long tokenValidity){
}
@Override public byte[] getCurrentSecret(){
return null;
}
@Override public byte[][] getAllSecrets(){
return null;
}
}
);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertTrue(filter.isCustomSignerSecretProvider());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.COOKIE_DOMAIN,AuthenticationFilter.COOKIE_PATH)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(".foo.com",filter.getCookieDomain());
Assert.assertEquals("/bar",filter.getCookiePath());
}
finally {
filter.destroy();
}
DummyAuthenticationHandler.reset();
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertTrue(DummyAuthenticationHandler.init);
}
finally {
filter.destroy();
Assert.assertTrue(DummyAuthenticationHandler.destroy);
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
}
catch ( ServletException ex) {
}
finally {
Assert.assertEquals(KerberosAuthenticationHandler.class,filter.getAuthenticationHandler().getClass());
filter.destroy();
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetTokenInvalidType() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
AuthenticationToken token=new AuthenticationToken("u","p","invalidtype");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
boolean failed=false;
try {
filter.getToken(request);
}
catch ( AuthenticationException ex) {
Assert.assertEquals("Invalid AuthenticationToken type",ex.getMessage());
failed=true;
}
finally {
Assert.assertTrue("token not invalid type",failed);
}
}
finally {
filter.destroy();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetters() throws Exception {
long expires=System.currentTimeMillis() + 50;
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(expires);
Assert.assertEquals("u",token.getUserName());
Assert.assertEquals("p",token.getName());
Assert.assertEquals("t",token.getType());
Assert.assertEquals(expires,token.getExpires());
Assert.assertFalse(token.isExpired());
Thread.sleep(70);
Assert.assertTrue(token.isExpired());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToStringAndParse() throws Exception {
long expires=System.currentTimeMillis() + 50;
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(expires);
String str=token.toString();
token=AuthenticationToken.parse(str);
Assert.assertEquals("p",token.getName());
Assert.assertEquals("t",token.getType());
Assert.assertEquals(expires,token.getExpires());
Assert.assertFalse(token.isExpired());
Thread.sleep(70);
Assert.assertTrue(token.isExpired());
}
BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAnonymous(){
Assert.assertNotNull(AuthenticationToken.ANONYMOUS);
Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getUserName());
Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getName());
Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getType());
Assert.assertEquals(-1,AuthenticationToken.ANONYMOUS.getExpires());
Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testInit() throws Exception {
Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab());
Set principals=handler.getPrincipals();
Principal expectedPrincipal=new KerberosPrincipal(KerberosTestUtils.getServerPrincipal());
Assert.assertTrue(principals.contains(expectedPrincipal));
Assert.assertEquals(1,principals.size());
}
APIUtilityVerifier BranchVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
createKeyTab(testKeytab,testPrincipals);
Pattern httpPattern=Pattern.compile("HTTP/.*");
String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern);
Assert.assertNotNull("principals cannot be null",httpPrincipals);
int expectedSize=0;
List httpPrincipalList=Arrays.asList(httpPrincipals);
for ( String principal : testPrincipals) {
if (httpPattern.matcher(principal).matches()) {
Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal));
expectedSize++;
}
}
Assert.assertEquals(expectedSize,httpPrincipals.length);
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab,testPrincipals);
String[] principals=KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null",principals);
int expectedSize=0;
List principalList=Arrays.asList(principals);
for ( String principal : testPrincipals) {
Assert.assertTrue("missing principal " + principal,principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize,principals.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test addUser/Group and removeUser/Group api.
*/
@Test public void testAddRemoveAPI(){
AccessControlList acl;
Collection users;
Collection groups;
acl=new AccessControlList(" ");
assertEquals(0,acl.getUsers().size());
assertEquals(0,acl.getGroups().size());
assertEquals(" ",acl.getAclString());
acl.addUser("drwho");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
assertEquals("drwho ",acl.getAclString());
acl.addGroup("tardis");
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
assertEquals("drwho tardis",acl.getAclString());
acl.addUser("joe");
acl.addGroup("users");
users=acl.getUsers();
assertEquals(users.size(),2);
Iterator iter=users.iterator();
assertEquals(iter.next(),"drwho");
assertEquals(iter.next(),"joe");
groups=acl.getGroups();
assertEquals(groups.size(),2);
iter=groups.iterator();
assertEquals(iter.next(),"tardis");
assertEquals(iter.next(),"users");
assertEquals("drwho,joe tardis,users",acl.getAclString());
acl.removeUser("joe");
acl.removeGroup("users");
users=acl.getUsers();
assertEquals(users.size(),1);
assertFalse(users.contains("joe"));
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertFalse(groups.contains("users"));
assertEquals("drwho tardis",acl.getAclString());
acl.removeGroup("tardis");
groups=acl.getGroups();
assertEquals(0,groups.size());
assertFalse(groups.contains("tardis"));
assertEquals("drwho ",acl.getAclString());
acl.removeUser("drwho");
assertEquals(0,users.size());
assertFalse(users.contains("drwho"));
assertEquals(0,acl.getGroups().size());
assertEquals(0,acl.getUsers().size());
assertEquals(" ",acl.getAclString());
}
BooleanVerifier
@Test public void testAclString(){
AccessControlList acl;
acl=new AccessControlList("*");
assertTrue(acl.toString().equals("All users are allowed"));
validateGetAclString(acl);
acl=new AccessControlList(" ");
assertTrue(acl.toString().equals("No users are allowed"));
acl=new AccessControlList("user1,user2");
assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
validateGetAclString(acl);
acl=new AccessControlList("user1,user2 ");
assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
validateGetAclString(acl);
acl=new AccessControlList(" group1,group2");
assertTrue(acl.toString().equals("Members of the groups [group1, group2] are allowed"));
validateGetAclString(acl);
acl=new AccessControlList("user1,user2 group1,group2");
assertTrue(acl.toString().equals("Users [user1, user2] and " + "members of the groups [group1, group2] are allowed"));
validateGetAclString(acl);
}
BooleanVerifier
@Test public void testWildCardAccessControlList() throws Exception {
AccessControlList acl;
acl=new AccessControlList("*");
assertTrue(acl.isAllAllowed());
acl=new AccessControlList(" * ");
assertTrue(acl.isAllAllowed());
acl=new AccessControlList(" *");
assertTrue(acl.isAllAllowed());
acl=new AccessControlList("* ");
assertTrue(acl.isAllAllowed());
}
InternalCallVerifier BooleanVerifier
/**
* Tests adding user/group to an wild card acl.
*/
@Test public void testAddRemoveToWildCardACL(){
AccessControlList acl=new AccessControlList(" * ");
assertTrue(acl.isAllAllowed());
UserGroupInformation drwho=UserGroupInformation.createUserForTesting("drwho@EXAMPLE.COM",new String[]{"aliens"});
UserGroupInformation drwho2=UserGroupInformation.createUserForTesting("drwho2@EXAMPLE.COM",new String[]{"tardis"});
acl.addUser("drwho");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("drwho"));
acl.addGroup("tardis");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("tardis"));
acl.removeUser("drwho");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho,acl);
acl.removeGroup("tardis");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho2,acl);
}
BooleanVerifier NullVerifier HybridVerifier
/**
* Tests adding/removing wild card as the user/group.
*/
@Test public void testAddRemoveWildCard(){
AccessControlList acl=new AccessControlList("drwho tardis");
Throwable th=null;
try {
acl.addUser(" * ");
}
catch ( Throwable t) {
th=t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th=null;
try {
acl.addGroup(" * ");
}
catch ( Throwable t) {
th=t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th=null;
try {
acl.removeUser(" * ");
}
catch ( Throwable t) {
th=t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th=null;
try {
acl.removeGroup(" * ");
}
catch ( Throwable t) {
th=t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
}
BooleanVerifier
@Test public void testProxyServer(){
Configuration conf=new Configuration();
assertFalse(ProxyServers.isProxyServer("1.1.1.1"));
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"2.2.2.2, 3.3.3.3");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
assertFalse(ProxyServers.isProxyServer("1.1.1.1"));
assertTrue(ProxyServers.isProxyServer("2.2.2.2"));
assertTrue(ProxyServers.isProxyServer("3.3.3.3"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testParallelDelegationTokenCreation() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(2000,24 * 60 * 60* 1000,7 * 24 * 60* 60* 1000,2000);
try {
dtSecretManager.startThreads();
int numThreads=100;
final int numTokensPerThread=100;
class tokenIssuerThread implements Runnable {
@Override public void run(){
for (int i=0; i < numTokensPerThread; i++) {
generateDelegationToken(dtSecretManager,"auser","arenewer");
try {
Thread.sleep(250);
}
catch ( Exception e) {
}
}
}
}
Thread[] issuers=new Thread[numThreads];
for (int i=0; i < numThreads; i++) {
issuers[i]=new Daemon(new tokenIssuerThread());
issuers[i].start();
}
for (int i=0; i < numThreads; i++) {
issuers[i].join();
}
Map tokenCache=dtSecretManager.getAllTokens();
Assert.assertEquals(numTokensPerThread * numThreads,tokenCache.size());
Iterator iter=tokenCache.keySet().iterator();
while (iter.hasNext()) {
TestDelegationTokenIdentifier id=iter.next();
DelegationTokenInformation info=tokenCache.get(id);
Assert.assertTrue(info != null);
DelegationKey key=dtSecretManager.getKey(id);
Assert.assertTrue(key != null);
byte[] storedPassword=dtSecretManager.retrievePassword(id);
byte[] password=dtSecretManager.createPassword(id,key);
Assert.assertTrue(Arrays.equals(password,storedPassword));
dtSecretManager.verifyToken(id,password);
}
}
finally {
dtSecretManager.stopThreads();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testCancelDelegationToken() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"FakeCanceller");
return null;
}
}
,AccessControlException.class);
dtSecretManager.cancelToken(token,"JobTracker");
Assert.assertTrue(dtSecretManager.isRemoveStoredTokenCalled);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"JobTracker");
return null;
}
}
,InvalidToken.class);
}
finally {
dtSecretManager.stopThreads();
}
}
BooleanVerifier
@Test public void testOverlongDtidSerialization() throws IOException {
byte[] bigBuf=new byte[Text.DEFAULT_MAX_LEN + 1];
for (int i=0; i < bigBuf.length; i++) {
bigBuf[i]=0;
}
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(new Text(bigBuf),new Text("renewer"),new Text("realUser")));
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(new Text("owner"),new Text(bigBuf),new Text("realUser")));
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(new Text("owner"),new Text("renewer"),new Text(bigBuf)));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenSecretManager() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,3 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
Assert.assertTrue(dtSecretManager.isStoreNewTokenCalled);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"FakeRenewer");
return null;
}
}
,AccessControlException.class);
long time=dtSecretManager.renewToken(token,"JobTracker");
Assert.assertTrue(dtSecretManager.isUpdateStoredTokenCalled);
assertTrue("renew time is in future",time > Time.now());
TestDelegationTokenIdentifier identifier=new TestDelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(2000);
try {
dtSecretManager.retrievePassword(identifier);
Assert.fail("Token should have expired");
}
catch ( InvalidToken e) {
}
dtSecretManager.renewToken(token,"JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(2000);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"JobTracker");
return null;
}
}
,InvalidToken.class);
}
finally {
dtSecretManager.stopThreads();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenNullRenewer() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
dtSecretManager.startThreads();
TestDelegationTokenIdentifier dtId=new TestDelegationTokenIdentifier(new Text("theuser"),null,null);
Token token=new Token(dtId,dtSecretManager);
Assert.assertTrue(token != null);
try {
dtSecretManager.renewToken(token,"");
Assert.fail("Renewal must not succeed");
}
catch ( IOException e) {
}
}
BooleanVerifier
@Test public void testSimpleDtidSerialization() throws IOException {
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(new Text("owner"),new Text("renewer"),new Text("realUser")));
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(new Text(""),new Text(""),new Text("")));
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(new Text(""),new Text("b"),new Text("")));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationKeyEqualAndHash(){
DelegationKey key1=new DelegationKey(1111,2222,"keyBytes".getBytes());
DelegationKey key2=new DelegationKey(1111,2222,"keyBytes".getBytes());
DelegationKey key3=new DelegationKey(3333,2222,"keyBytes".getBytes());
Assert.assertEquals(key1,key2);
Assert.assertFalse(key2.equals(key3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRollMasterKey() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(800,800,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
byte[] oldPasswd=token.getPassword();
int prevNumKeys=dtSecretManager.getAllKeys().length;
dtSecretManager.rollMasterKey();
Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);
int currNumKeys=dtSecretManager.getAllKeys().length;
Assert.assertEquals((currNumKeys - prevNumKeys) >= 1,true);
ByteArrayInputStream bi=new ByteArrayInputStream(token.getIdentifier());
TestDelegationTokenIdentifier identifier=dtSecretManager.createIdentifier();
identifier.readFields(new DataInputStream(bi));
byte[] newPasswd=dtSecretManager.retrievePassword(identifier);
Assert.assertEquals(oldPasswd,newPasswd);
while (!dtSecretManager.isRemoveStoredMasterKeyCalled) {
Thread.sleep(200);
}
}
finally {
dtSecretManager.stopThreads();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testDTManager() throws Exception {
DelegationTokenManager tm=new DelegationTokenManager(new Text("foo"),DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS);
tm.init();
Token token=tm.createToken(UserGroupInformation.getCurrentUser(),"foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token,"foo") > System.currentTimeMillis());
tm.cancelToken(token,"foo");
try {
tm.verifyToken(token);
Assert.fail();
}
catch ( IOException ex) {
}
catch ( Exception ex) {
Assert.fail();
}
tm.destroy();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenAuthenticatorCalls() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class),"/*",0);
context.addServlet(new ServletHolder(PingServlet.class),"/bar");
try {
jetty.start();
URL nonAuthURL=new URL(getJettyURL() + "/foo/bar");
URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2=new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
try {
aUrl.getDelegationToken(nonAuthURL,token,FOO_USER);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL,token);
try {
aUrl.renewDelegationToken(nonAuthURL,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
try {
aUrl.renewDelegationToken(authURL2,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
aUrl.cancelDelegationToken(authURL,token);
aUrl.getDelegationToken(authURL,token,FOO_USER);
aUrl.cancelDelegationToken(nonAuthURL,token);
aUrl.getDelegationToken(authURL,token,FOO_USER);
try {
aUrl.renewDelegationToken(nonAuthURL,token);
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
}
finally {
jetty.stop();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testKerberosDelegationTokenAuthenticator() throws Exception {
org.apache.hadoop.conf.Configuration conf=new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir=new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc=new MiniKdc(MiniKdc.createConf(),testDir);
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(KDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
kdc.start();
File keytabFile=new File(testDir,"test.keytab");
kdc.createPrincipal(keytabFile,"client","HTTP/localhost");
KDTAFilter.keytabFile=keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
final URL url=new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.fail();
}
catch ( AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client",keytabFile.getAbsolutePath(),new Callable(){
@Override public Void call() throws Exception {
aUrl.getDelegationToken(url,token,"client");
Assert.assertNotNull(token.getDelegationToken());
aUrl.renewDelegationToken(url,token);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url,token,FOO_USER);
aUrl.cancelDelegationToken(url,token);
Assert.assertNull(token.getDelegationToken());
return null;
}
}
);
}
finally {
jetty.stop();
kdc.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveService(){
CompositeService testService=new CompositeService("TestService"){
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
Service service1=new AbstractService("Service1"){
}
;
addIfService(service1);
Service service2=new AbstractService("Service2"){
}
;
addIfService(service2);
Service service3=new AbstractService("Service3"){
}
;
addIfService(service3);
removeService(service1);
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",2,testService.getServices().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testAddIfService(){
CompositeService testService=new CompositeService("TestService"){
Service service;
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
service=new AbstractService("Service"){
}
;
assertTrue("Unable to add a service",addIfService(service));
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",1,testService.getServices().size());
}
BooleanVerifier
/**
* Test that register/unregister works
*/
@Test public void testRegisterListener(){
register();
assertTrue("listener not registered",unregister());
}
BooleanVerifier
/**
* Test that double registration results in one registration only.
*/
@Test public void testRegisterListenerTwice(){
register();
register();
assertTrue("listener not registered",unregister());
assertFalse("listener double registered",unregister());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test verifies that you can block waiting for something to happen
* and use notifications to manage it
* @throws Throwable on a failure
*/
@Test public void testListenerWithNotifications() throws Throwable {
AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000);
NotifyingListener listener=new NotifyingListener();
service.registerServiceListener(listener);
service.init(new Configuration());
service.start();
assertServiceInState(service,Service.STATE.STARTED);
long start=System.currentTimeMillis();
synchronized (listener) {
listener.wait(20000);
}
long duration=System.currentTimeMillis() - start;
assertEquals(Service.STATE.STOPPED,listener.notifyingState);
assertServiceInState(service,Service.STATE.STOPPED);
assertTrue("Duration of " + duration + " too long",duration < 10000);
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test public void testFormat() throws IOException {
JobConf job=new JobConf(conf);
FileSystem fs=FileSystem.getLocal(conf);
Path dir=new Path(System.getProperty("test.build.data",".") + "/mapred");
Path txtFile=new Path(dir,"auto.txt");
Path seqFile=new Path(dir,"auto.seq");
fs.delete(dir,true);
FileInputFormat.setInputPaths(job,dir);
Writer txtWriter=new OutputStreamWriter(fs.create(txtFile));
try {
for (int i=0; i < LINES_COUNT; i++) {
txtWriter.write("" + (10 * i));
txtWriter.write("\n");
}
}
finally {
txtWriter.close();
}
SequenceFile.Writer seqWriter=SequenceFile.createWriter(fs,conf,seqFile,IntWritable.class,LongWritable.class);
try {
for (int i=0; i < RECORDS_COUNT; i++) {
IntWritable key=new IntWritable(11 * i);
LongWritable value=new LongWritable(12 * i);
seqWriter.append(key,value);
}
}
finally {
seqWriter.close();
}
AutoInputFormat format=new AutoInputFormat();
InputSplit[] splits=format.getSplits(job,SPLITS_COUNT);
for ( InputSplit split : splits) {
RecordReader reader=format.getRecordReader(split,job,Reporter.NULL);
Object key=reader.createKey();
Object value=reader.createValue();
try {
while (reader.next(key,value)) {
if (key instanceof LongWritable) {
assertEquals("Wrong value class.",Text.class,value.getClass());
assertTrue("Invalid value",Integer.parseInt(((Text)value).toString()) % 10 == 0);
}
else {
assertEquals("Wrong key class.",IntWritable.class,key.getClass());
assertEquals("Wrong value class.",LongWritable.class,value.getClass());
assertTrue("Invalid key.",((IntWritable)key).get() % 11 == 0);
assertTrue("Invalid value.",((LongWritable)value).get() % 12 == 0);
}
}
}
finally {
reader.close();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDumping() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
DumpTypedBytes dumptb=new DumpTypedBytes(conf);
try {
Path root=new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
OutputStreamWriter writer=new OutputStreamWriter(fs.create(new Path(root,"test.txt")));
try {
for (int i=0; i < 100; i++) {
writer.write("" + (10 * i) + "\n");
}
}
finally {
writer.close();
}
String[] args=new String[1];
args[0]="/typedbytestest";
int ret=dumptb.run(args);
assertEquals("Return value != 0.",0,ret);
ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray());
TypedBytesInput tbinput=new TypedBytesInput(new DataInputStream(in));
int counter=0;
Object key=tbinput.read();
while (key != null) {
assertEquals(Long.class,key.getClass());
Object value=tbinput.read();
assertEquals(String.class,value.getClass());
assertTrue("Invalid output.",Integer.parseInt(value.toString()) % 10 == 0);
counter++;
key=tbinput.read();
}
assertEquals("Wrong number of outputs.",100,counter);
}
finally {
try {
fs.close();
}
catch ( Exception e) {
}
System.setOut(psBackup);
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLoading() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
ByteArrayOutputStream out=new ByteArrayOutputStream();
TypedBytesOutput tboutput=new TypedBytesOutput(new DataOutputStream(out));
for (int i=0; i < 100; i++) {
tboutput.write(new Long(i));
tboutput.write("" + (10 * i));
}
InputStream isBackup=System.in;
ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray());
System.setIn(in);
LoadTypedBytes loadtb=new LoadTypedBytes(conf);
try {
Path root=new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
String[] args=new String[1];
args[0]="/typedbytestest/test.seq";
int ret=loadtb.run(args);
assertEquals("Return value != 0.",0,ret);
Path file=new Path(root,"test.seq");
assertTrue(fs.exists(file));
SequenceFile.Reader reader=new SequenceFile.Reader(fs,file,conf);
int counter=0;
TypedBytesWritable key=new TypedBytesWritable();
TypedBytesWritable value=new TypedBytesWritable();
while (reader.next(key,value)) {
assertEquals(Long.class,key.getValue().getClass());
assertEquals(String.class,value.getValue().getClass());
assertTrue("Invalid record.",Integer.parseInt(value.toString()) % 10 == 0);
counter++;
}
assertEquals("Wrong number of records.",100,counter);
}
finally {
try {
fs.close();
}
catch ( Exception e) {
}
System.setIn(isBackup);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFramework(){
JobConf jobConf=new JobConf();
jobConf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS,"jthost:9090");
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME);
assertTrue("Expected 'isLocal' to be true",StreamUtil.isLocalJobTracker(jobConf));
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue("Creating " + TEST_DIR,TEST_DIR.mkdirs());
args.clear();
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
FileOutputStream out=new FileOutputStream(INPUT_FILE.getAbsoluteFile());
out.write("hello\n".getBytes());
out.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testCommandLine() throws Exception {
super.testCommandLine();
String counterGrp="org.apache.hadoop.mapred.Task$Counter";
Counters counters=job.running_.getCounters();
assertTrue(counters.findCounter(counterGrp,"COMBINE_INPUT_RECORDS").getValue() != 0);
assertTrue(counters.findCounter(counterGrp,"COMBINE_OUTPUT_RECORDS").getValue() != 0);
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
FileOutputStream out=new FileOutputStream(INPUT_FILE.getAbsoluteFile());
out.write("hello\n".getBytes());
out.close();
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNoErrors() throws Exception {
final AtomicInteger threadsRun=new AtomicInteger();
TestContext ctx=new TestContext();
for (int i=0; i < 3; i++) {
ctx.addThread(new TestingThread(ctx){
@Override public void doWork() throws Exception {
threadsRun.incrementAndGet();
}
}
);
}
assertEquals(0,threadsRun.get());
ctx.startThreads();
long st=Time.now();
ctx.waitFor(30000);
long et=Time.now();
assertEquals(3,threadsRun.get());
assertTrue("Test took " + (et - st) + "ms",et - st < 5000);
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testThreadThrowsCheckedException() throws Exception {
TestContext ctx=new TestContext();
ctx.addThread(new TestingThread(ctx){
@Override public void doWork() throws Exception {
throw new IOException("my ioe");
}
}
);
ctx.startThreads();
long st=Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
}
catch ( RuntimeException rte) {
assertEquals("my ioe",rte.getCause().getMessage());
}
long et=Time.now();
assertTrue("Test took " + (et - st) + "ms",et - st < 5000);
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testThreadFails() throws Exception {
TestContext ctx=new TestContext();
ctx.addThread(new TestingThread(ctx){
@Override public void doWork() throws Exception {
fail(FAIL_MSG);
}
}
);
ctx.startThreads();
long st=Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
}
catch ( RuntimeException rte) {
assertEquals(FAIL_MSG,rte.getCause().getMessage());
}
long et=Time.now();
assertTrue("Test took " + (et - st) + "ms",et - st < 5000);
}
BooleanVerifier
@Test public void testRepeatingThread() throws Exception {
final AtomicInteger counter=new AtomicInteger();
TestContext ctx=new TestContext();
ctx.addThread(new RepeatingTestThread(ctx){
@Override public void doAnAction() throws Exception {
counter.incrementAndGet();
}
}
);
ctx.startThreads();
long st=Time.now();
ctx.waitFor(3000);
ctx.stop();
long et=Time.now();
long elapsed=et - st;
assertTrue("Test took " + (et - st) + "ms",Math.abs(elapsed - 3000) < 500);
assertTrue("Counter value = " + counter.get(),counter.get() > 1000);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500) public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s=null;
while (true) {
s=TimedOutTestsListener.buildDeadlockInfo();
if (s != null) break;
Thread.sleep(100);
}
Assert.assertEquals(3,countStringOccurrences(s,"BLOCKED"));
Failure failure=new Failure(null,new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
StringWriter writer=new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out=writer.toString();
Assert.assertTrue(out.contains("THREAD DUMP"));
Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testSkipCopy() throws Exception {
SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS){
@Override protected boolean shouldCopy( Path path, DistCpOptions options){
return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME);
}
}
;
FileSystem fs=FileSystem.get(getConf());
List srcPaths=new ArrayList();
srcPaths.add(new Path("/tmp/in4/1"));
srcPaths.add(new Path("/tmp/in4/2"));
Path target=new Path("/tmp/out4/1");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/_SUCCESS");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/file");
TestDistCpUtils.createFile(fs,"/tmp/in4/2");
fs.mkdirs(target);
DistCpOptions options=new DistCpOptions(srcPaths,target);
Path listingFile=new Path("/tmp/list4");
listing.buildListing(listingFile,options);
Assert.assertEquals(listing.getNumberOfPaths(),3);
SequenceFile.Reader reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listingFile));
CopyListingFileStatus fileStatus=new CopyListingFileStatus();
Text relativePath=new Text();
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1/file");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/2");
Assert.assertFalse(reader.next(relativePath,fileStatus));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testBuildListingForSingleFile(){
FileSystem fs=null;
String testRootString="/singleFileListing";
Path testRoot=new Path(testRootString);
SequenceFile.Reader reader=null;
try {
fs=FileSystem.get(getConf());
if (fs.exists(testRoot)) TestDistCpUtils.delete(fs,testRootString);
Path sourceFile=new Path(testRoot,"/source/foo/bar/source.txt");
Path decoyFile=new Path(testRoot,"/target/moo/source.txt");
Path targetFile=new Path(testRoot,"/target/moo/target.txt");
TestDistCpUtils.createFile(fs,sourceFile.toString());
TestDistCpUtils.createFile(fs,decoyFile.toString());
TestDistCpUtils.createFile(fs,targetFile.toString());
List srcPaths=new ArrayList();
srcPaths.add(sourceFile);
DistCpOptions options=new DistCpOptions(srcPaths,targetFile);
CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS);
final Path listFile=new Path(testRoot,"/tmp/fileList.seq");
listing.buildListing(listFile,options);
reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listFile));
CopyListingFileStatus fileStatus=new CopyListingFileStatus();
Text relativePath=new Text();
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertTrue(relativePath.toString().equals(""));
}
catch ( Exception e) {
Assert.fail("Unexpected exception encountered.");
LOG.error("Unexpected exception: ",e);
}
finally {
TestDistCpUtils.delete(fs,testRootString);
IOUtils.closeStream(reader);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test main method of DistCp. Method should to call System.exit().
*/
@Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
try {
String[] arg={target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
}
catch ( ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status,0);
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
InternalCallVerifier BooleanVerifier
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test public void testCleanup() throws Exception {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
DistCp distcp=new DistCp(conf,null);
String[] arg={soure.toString(),target.toString()};
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyToLocal() throws Exception {
final String fullHarPathStr=makeArchive();
final String tmpDir=System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath=new Path(tmpDir);
final LocalFileSystem localFs=FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath,true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
final HarFileSystem harFileSystem=new HarFileSystem(fs);
try {
final URI harUri=new URI(fullHarPathStr);
harFileSystem.initialize(harUri,fs.getConf());
final Path sourcePath=new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath=new Path(tmpPath,"straus");
harFileSystem.copyToLocalFile(false,sourcePath,targetPath);
FileStatus straus=localFs.getFileStatus(targetPath);
assertEquals(1,straus.getLen());
}
finally {
harFileSystem.close();
localFs.delete(tmpPath,true);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test public void testDataNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="DataNode";
jmx.setService(serviceName);
jmx.init();
assertEquals(fileSize,Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test public void testNameNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test1"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="NameNode";
jmx.setService(serviceName);
jmx.init();
assertTrue("error printAllValues",checkPrintAllValues(jmx));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks",Long.parseLong(jmx.getValue("CorruptBlocks")),getMetrics("FSNamesystem"));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumOpenConnections")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testParseAtomicCommit(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldAtomicCommit());
options=OptionsParser.parse(new String[]{"-atomic","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldAtomicCommit());
try {
OptionsParser.parse(new String[]{"-atomic","-update","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.fail("Atomic and sync folders were allowed");
}
catch ( IllegalArgumentException ignore) {
}
}
BooleanVerifier
@Test public void testParseBlokcing(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldBlock());
options=OptionsParser.parse(new String[]{"-async","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldBlock());
}
InternalCallVerifier BooleanVerifier
@Test public void testParseSkipCRC(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSkipCRC());
options=OptionsParser.parse(new String[]{"-update","-skipcrccheck","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldSkipCRC());
}
BooleanVerifier
@Test public void testParseSyncFolders(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSyncFolder());
options=OptionsParser.parse(new String[]{"-update","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testParseOverwrite(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldOverwrite());
options=OptionsParser.parse(new String[]{"-overwrite","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
try {
OptionsParser.parse(new String[]{"-update","-overwrite","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.fail("Update and overwrite aren't allowed together");
}
catch ( IllegalArgumentException ignore) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testParseDeleteMissing(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldDeleteMissing());
options=OptionsParser.parse(new String[]{"-update","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldDeleteMissing());
options=OptionsParser.parse(new String[]{"-overwrite","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
Assert.assertTrue(options.shouldDeleteMissing());
try {
OptionsParser.parse(new String[]{"-atomic","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.fail("Atomic and delete folders were allowed");
}
catch ( IllegalArgumentException ignore) {
}
}
BooleanVerifier
@Test public void testParseIgnoreFailure(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldIgnoreFailures());
options=OptionsParser.parse(new String[]{"-i","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldIgnoreFailures());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPreserve(){
DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-p","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbr","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbrgup","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbrgupcax","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL));
Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pc","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
int i=0;
Iterator attribIterator=options.preserveAttributes();
while (attribIterator.hasNext()) {
attribIterator.next();
i++;
}
Assert.assertEquals(i,6);
try {
OptionsParser.parse(new String[]{"-pabcd","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target"});
Assert.fail("Invalid preserve attribute");
}
catch ( IllegalArgumentException ignore) {
}
catch ( NoSuchElementException ignore) {
}
options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOptionsAppendToConf(){
Configuration conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
DistCpOptions options=OptionsParser.parse(new String[]{"-atomic","-i","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),DistCpConstants.DEFAULT_BANDWIDTH_MB);
conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),null);
options=OptionsParser.parse(new String[]{"-update","-delete","-pu","-bandwidth","11","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),"U");
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),11);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testOptionsSwitchAddToConf(){
Configuration conf=new Configuration();
Assert.assertNull(conf.get(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel()));
DistCpOptionSwitch.addToConf(conf,DistCpOptionSwitch.ATOMIC_COMMIT);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAppendOption(){
Configuration conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
DistCpOptions options=OptionsParser.parse(new String[]{"-update","-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
try {
options=OptionsParser.parse(new String[]{"-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
fail("Append should fail if update option is not specified");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Append is valid only with update options",e);
}
try {
options=OptionsParser.parse(new String[]{"-append","-update","-skipcrccheck","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
fail("Append should fail if skipCrc option is specified");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Append is disallowed when skipping CRC",e);
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testDFSAdminInvalidUsageHelp(){
ImmutableSet args=ImmutableSet.of("-report","-saveNamespace","-rollEdits","-restoreFailedStorage","-refreshNodes","-finalizeUpgrade","-metasave","-refreshUserToGroupsMappings","-printTopology","-refreshNamenodes","-deleteBlockPool","-setBalancerBandwidth","-fetchImage");
try {
for ( String arg : args) assertTrue(ToolRunner.run(new DFSAdmin(),fillArgs(arg)) == -1);
assertTrue(ToolRunner.run(new DFSAdmin(),new String[]{"-help","-some"}) == 0);
}
catch ( Exception e) {
fail("testDFSAdminHelp error" + e);
}
String pattern="Usage: java DFSAdmin";
checkOutput(new String[]{"-cancel","-renew"},pattern,System.err,DFSAdmin.class);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitExistingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
fs.mkdirs(new Path(finalPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
try {
committer.commitJob(jobContext);
Assert.fail("Should not be able to atomic-commit to pre-existing path.");
}
catch ( Exception exception) {
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
LOG.info("Atomic-commit Test pass.");
}
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for atomic commit.",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitMissingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertFalse(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for preserve status",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=40000) public void testCopyFailOnBlockSizeDifference(){
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
Configuration configuration=context.getConfiguration();
EnumSet fileAttributes=EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for ( Path path : pathList) {
final FileStatus fileStatus=fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),path)),new CopyListingFileStatus(fileStatus),context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch ( Exception exception) {
Assert.assertTrue("Failure exception should have suggested the use of -pb.",exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=40000) public void testDirToFile(){
try {
deleteState();
createSourceData();
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
mkdirs(SOURCE_PATH + "/src/file");
touchFile(TARGET_PATH + "/src/file");
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(fs.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context);
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
}
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=40000) public void testSkipCopyNoPerms(){
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper=new CopyMapper();
final StubContext stubContext=tmpUser.doAs(new PrivilegedAction(){
@Override public StubContext run(){
try {
return new StubContext(getConfiguration(),null,0);
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
throw new RuntimeException(e);
}
}
}
);
final Mapper.Context context=stubContext.getContext();
EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
touchFile(TARGET_PATH + "/src/file");
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ));
final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){
@Override public FileSystem run(){
try {
return FileSystem.get(configuration);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
}
);
tmpUser.doAs(new PrivilegedAction(){
@Override public Integer run(){
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context);
Assert.assertEquals(stubContext.getWriter().values().size(),1);
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP"));
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().contains(SOURCE_PATH + "/src/file"));
}
catch ( Exception e) {
throw new RuntimeException(e);
}
return null;
}
}
);
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=40000) public void testPreserve(){
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper=new CopyMapper();
final Mapper.Context context=tmpUser.doAs(new PrivilegedAction.Context>(){
@Override public Mapper.Context run(){
try {
StubContext stubContext=new StubContext(getConfiguration(),null,0);
return stubContext.getContext();
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
throw new RuntimeException(e);
}
}
}
);
EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH);
cluster.getFileSystem().setPermission(new Path(TARGET_PATH),new FsPermission((short)511));
final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){
@Override public FileSystem run(){
try {
return FileSystem.get(configuration);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
}
);
tmpUser.doAs(new PrivilegedAction(){
@Override public Integer run(){
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context);
Assert.fail("Expected copy to fail");
}
catch ( AccessControlException e) {
Assert.assertTrue("Got exception: " + e.getMessage(),true);
}
catch ( Exception e) {
throw new RuntimeException(e);
}
return null;
}
}
);
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000) public void testSingleFileCopy(){
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath=pathList.get(0);
Path targetFilePath=new Path(sourceFilePath.toString().replaceAll(SOURCE_PATH,TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.getParent().toString());
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus=new CopyListingFileStatus(fs.getFileStatus(sourceFilePath));
long before=fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
long after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped",before == after);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.toString());
copyMapper.setup(context);
before=fs.getFileStatus(targetFilePath).getModificationTime();
try {
Thread.sleep(2);
}
catch ( Throwable ignore) {
}
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.",before < after);
}
catch ( Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=40000) public void testFileToDir(){
try {
deleteState();
createSourceData();
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH + "/src/file");
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(fs.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context);
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
}
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
}
}
BooleanVerifier
@Test(timeout=40000) public void testMakeDirFailure(){
try {
deleteState();
createSourceData();
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
Configuration configuration=context.getConfiguration();
String workPath=new Path("webhdfs://localhost:1234/*/*/*/?/").makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),pathList.get(0))),new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))),context);
Assert.assertTrue("There should have been an exception.",false);
}
catch ( Exception ignore) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testGetOutputCommitter(){
try {
TaskAttemptContext context=new TaskAttemptContextImpl(new Configuration(),new TaskAttemptID("200707121733",1,TaskType.MAP,1,1));
context.getConfiguration().set("mapred.output.dir","/out");
Assert.assertTrue(new CopyOutputFormat().getOutputCommitter(context) instanceof CopyCommitter);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Unable to get output committer");
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSplits() throws Exception {
DistCpOptions options=getOptions();
Configuration configuration=new Configuration();
configuration.set("mapred.map.tasks",String.valueOf(options.getMaxMaps()));
CopyListing.getCopyListing(configuration,CREDENTIALS,options).buildListing(new Path(cluster.getFileSystem().getUri().toString() + "/tmp/testDynInputFormat/fileList.seq"),options);
JobContext jobContext=new JobContextImpl(configuration,new JobID());
DynamicInputFormat inputFormat=new DynamicInputFormat();
List splits=inputFormat.getSplits(jobContext);
int nFiles=0;
int taskId=0;
for ( InputSplit split : splits) {
RecordReader recordReader=inputFormat.createRecordReader(split,null);
StubContext stubContext=new StubContext(jobContext.getConfiguration(),recordReader,taskId);
final TaskAttemptContext taskAttemptContext=stubContext.getContext();
recordReader.initialize(splits.get(0),taskAttemptContext);
float previousProgressValue=0f;
while (recordReader.nextKeyValue()) {
CopyListingFileStatus fileStatus=recordReader.getCurrentValue();
String source=fileStatus.getPath().toString();
System.out.println(source);
Assert.assertTrue(expectedFilePaths.contains(source));
final float progress=recordReader.getProgress();
Assert.assertTrue(progress >= previousProgressValue);
Assert.assertTrue(progress >= 0.0f);
Assert.assertTrue(progress <= 1.0f);
previousProgressValue=progress;
++nFiles;
}
Assert.assertTrue(recordReader.getProgress() == 1.0f);
++taskId;
}
Assert.assertEquals(expectedFilePaths.size(),nFiles);
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* @throws IOExceptionThere should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
* There will be pairs of files, inputXxx.json and goldXxx.json .
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test public void testHistograms() throws IOException {
final Configuration conf=new Configuration();
final FileSystem lfs=FileSystem.getLocal(conf);
final Path rootInputDir=new Path(System.getProperty("test.tools.input.dir","")).makeQualified(lfs);
final Path rootInputFile=new Path(rootInputDir,"rumen/histogram-tests");
FileStatus[] tests=lfs.listStatus(rootInputFile);
for (int i=0; i < tests.length; ++i) {
Path filePath=tests[i].getPath();
String fileName=filePath.getName();
if (fileName.startsWith("input")) {
String testName=fileName.substring("input".length());
Path goldFilePath=new Path(rootInputFile,"gold" + testName);
assertTrue("Gold file dies not exist",lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult=histogramFileToCDF(filePath,lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream=lfs.open(goldFilePath);
JsonObjectMapperParser parser=new JsonObjectMapperParser(goldStream,LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf=parser.getNext();
dcdf.deepCompare(newResult,new TreePath(null,""));
}
catch ( DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testOneRun(){
LoggedDiscreteCDF input=new LoggedDiscreteCDF();
input.setMinimum(100000L);
input.setMaximum(1100000L);
ArrayList rankings=new ArrayList();
rankings.add(makeRR(0.1,200000L));
rankings.add(makeRR(0.5,800000L));
rankings.add(makeRR(0.9,1000000L));
input.setRankings(rankings);
input.setNumberValues(3);
CDFRandomGenerator gen=new CDFPiecewiseLinearRandomGenerator(input);
Histogram values=new Histogram();
for (int i=0; i < 1000000; ++i) {
long value=gen.randomValue();
values.enter(value);
}
int[] percentiles=new int[99];
for (int i=0; i < 99; ++i) {
percentiles[i]=i + 1;
}
long[] result=values.getCDF(100,percentiles);
long sumErrorSquares=0L;
for (int i=0; i < 10; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=10; i < 50; ++i) {
long error=result[i] - (15000L * i + 50000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (15000L * i + 50000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=50; i < 90; ++i) {
long error=result[i] - (5000L * i + 550000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (5000L * i + 550000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=90; i <= 100; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
double realSumErrorSquares=(double)sumErrorSquares;
double normalizedError=realSumErrorSquares / 100 / rankings.get(1).getDatum()/ rankings.get(1).getDatum();
double RMSNormalizedError=Math.sqrt(normalizedError);
System.out.println("sumErrorSquares = " + sumErrorSquares);
System.out.println("normalizedError: " + normalizedError + ", RMSNormalizedError: "+ RMSNormalizedError);
System.out.println("Cumulative error is " + RMSNormalizedError);
assertTrue("The RMS relative error per bucket, " + RMSNormalizedError + ", exceeds our tolerance of "+ maximumRelativeError,RMSNormalizedError <= maximumRelativeError);
}
BooleanVerifier
@Test public void testSeedGeneration(){
long masterSeed1=42;
long masterSeed2=43;
assertTrue("Deterministic seeding",getSeed("stream1",masterSeed1) == getSeed("stream1",masterSeed1));
assertTrue("Deterministic seeding",getSeed("stream2",masterSeed2) == getSeed("stream2",masterSeed2));
assertTrue("Different streams",getSeed("stream1",masterSeed1) != getSeed("stream2",masterSeed1));
assertTrue("Different master seeds",getSeed("stream1",masterSeed1) != getSeed("stream1",masterSeed2));
}
BooleanVerifier
@Test public void testRetriableCommand(){
try {
new MyRetriableCommand(5).execute(0);
Assert.assertTrue(false);
}
catch ( Exception e) {
Assert.assertTrue(true);
}
try {
new MyRetriableCommand(3).execute(0);
Assert.assertTrue(true);
}
catch ( Exception e) {
Assert.assertTrue(false);
}
try {
new MyRetriableCommand(5,RetryPolicies.retryUpToMaximumCountWithFixedSleep(5,0,TimeUnit.MILLISECONDS)).execute(0);
Assert.assertTrue(true);
}
catch ( Exception e) {
Assert.assertTrue(false);
}
}
BooleanVerifier
@Test public void testIsSystemClass(){
assertFalse(isSystemClass("org.example.Foo",null));
assertTrue(isSystemClass("org.example.Foo",classes("org.example.Foo")));
assertTrue(isSystemClass("/org.example.Foo",classes("org.example.Foo")));
assertTrue(isSystemClass("org.example.Foo",classes("org.example.")));
assertTrue(isSystemClass("net.example.Foo",classes("org.example.,net.example.")));
assertFalse(isSystemClass("org.example.Foo",classes("-org.example.Foo,org.example.")));
assertTrue(isSystemClass("org.example.Bar",classes("-org.example.Foo.,org.example.")));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConstructUrlsFromClasspath() throws Exception {
File file=new File(testDir,"file");
assertTrue("Create file",file.createNewFile());
File dir=new File(testDir,"dir");
assertTrue("Make dir",dir.mkdir());
File jarsDir=new File(testDir,"jarsdir");
assertTrue("Make jarsDir",jarsDir.mkdir());
File nonJarFile=new File(jarsDir,"nonjar");
assertTrue("Create non-jar file",nonJarFile.createNewFile());
File jarFile=new File(jarsDir,"a.jar");
assertTrue("Create jar file",jarFile.createNewFile());
File nofile=new File(testDir,"nofile");
StringBuilder cp=new StringBuilder();
cp.append(file.getAbsolutePath()).append(File.pathSeparator).append(dir.getAbsolutePath()).append(File.pathSeparator).append(jarsDir.getAbsolutePath() + "/*").append(File.pathSeparator).append(nofile.getAbsolutePath()).append(File.pathSeparator).append(nofile.getAbsolutePath() + "/*").append(File.pathSeparator);
URL[] urls=constructUrlsFromClasspath(cp.toString());
assertEquals(3,urls.length);
assertEquals(file.toURI().toURL(),urls[0]);
assertEquals(dir.toURI().toURL(),urls[1]);
assertEquals(jarFile.toURI().toURL(),urls[2]);
}
BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=1000) public void testFindContainingJar(){
String containingJar=ClassUtil.findContainingJar(Logger.class);
Assert.assertNotNull("Containing jar not found for Logger",containingJar);
File jarFile=new File(containingJar);
Assert.assertTrue("Containing jar does not exist on file system",jarFile.exists());
Assert.assertTrue("Incorrect jar file" + containingJar,jarFile.getName().matches("log4j.+[.]jar"));
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp(){
assertTrue(FileUtil.fullyDelete(TEST_DIR));
assertTrue(TEST_DIR.mkdirs());
oldStdout=System.out;
oldStderr=System.err;
stdout=new ByteArrayOutputStream();
printStdout=new PrintStream(stdout);
System.setOut(printStdout);
stderr=new ByteArrayOutputStream();
printStderr=new PrintStream(stderr);
System.setErr(printStderr);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testUnrecognized(){
try {
Classpath.main(new String[]{"--notarealoption"});
fail("expected exit");
}
catch ( ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr=new String(stderr.toByteArray(),UTF8);
assertTrue(strErr.contains("unrecognized option"));
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testJarFileMissing() throws IOException {
try {
Classpath.main(new String[]{"--jar"});
fail("expected exit");
}
catch ( ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr=new String(stderr.toByteArray(),UTF8);
assertTrue(strErr.contains("requires path of jar"));
}
}
TestCleaner BooleanVerifier HybridVerifier
@After public void tearDown(){
System.setOut(oldStdout);
System.setErr(oldStderr);
IOUtils.cleanup(LOG,printStdout,printStderr);
assertTrue(FileUtil.fullyDelete(TEST_DIR));
}
APIUtilityVerifier BooleanVerifier
@Test public void testHelpShort(){
Classpath.main(new String[]{"-h"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
BooleanVerifier
@Test public void testJar() throws IOException {
File file=new File(TEST_DIR,"classpath.jar");
Classpath.main(new String[]{"--jar",file.getAbsolutePath()});
assertTrue(stdout.toByteArray().length == 0);
assertTrue(stderr.toByteArray().length == 0);
assertTrue(file.exists());
assertJar(file);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlob(){
Classpath.main(new String[]{"--glob"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertEquals(System.getProperty("java.class.path"),strOut.trim());
assertTrue(stderr.toByteArray().length == 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testHelp(){
Classpath.main(new String[]{"--help"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEquality(){
assertEquals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512),DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512));
assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,1024)));
assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C,512)));
}
InternalCallVerifier BooleanVerifier
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test public void testSubnetsAndIPs() throws IOException {
String[] ips={"10.119.103.112","10.221.102.0/23"};
createFileWithEntries("ips.txt",ips);
IPList ipList=new FileBasedIPList("ips.txt");
assertTrue("10.119.103.112 is not in the list",ipList.isIn("10.119.103.112"));
assertFalse("10.119.103.113 is in the list",ipList.isIn("10.119.103.113"));
assertTrue("10.221.102.0 is not in the list",ipList.isIn("10.221.102.0"));
assertTrue("10.221.102.1 is not in the list",ipList.isIn("10.221.102.1"));
assertTrue("10.221.103.1 is not in the list",ipList.isIn("10.221.103.1"));
assertTrue("10.221.103.255 is not in the list",ipList.isIn("10.221.103.255"));
assertFalse("10.221.104.0 is in the list",ipList.isIn("10.221.104.0"));
assertFalse("10.221.104.1 is in the list",ipList.isIn("10.221.104.1"));
}
BooleanVerifier
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test public void testNullIP() throws IOException {
String[] ips={"10.119.103.112","10.221.102.0/23"};
createFileWithEntries("ips.txt",ips);
IPList ipList=new FileBasedIPList("ips.txt");
assertFalse("Null Ip is in the list",ipList.isIn(null));
}
InternalCallVerifier BooleanVerifier
/**
* Add a bunch of subnets and IPSs to the file
* Check for inclusion
* Check for exclusion
*/
@Test public void testWithMultipleSubnetAndIPs() throws IOException {
String[] ips={"10.119.103.112","10.221.102.0/23","10.222.0.0/16","10.113.221.221"};
createFileWithEntries("ips.txt",ips);
IPList ipList=new FileBasedIPList("ips.txt");
assertTrue("10.119.103.112 is not in the list",ipList.isIn("10.119.103.112"));
assertFalse("10.119.103.113 is in the list",ipList.isIn("10.119.103.113"));
assertTrue("10.221.103.121 is not in the list",ipList.isIn("10.221.103.121"));
assertFalse("10.221.104.0 is in the list",ipList.isIn("10.221.104.0"));
assertTrue("10.222.103.121 is not in the list",ipList.isIn("10.222.103.121"));
assertFalse("10.223.104.0 is in the list",ipList.isIn("10.223.104.0"));
assertTrue("10.113.221.221 is not in the list",ipList.isIn("10.113.221.221"));
assertFalse("10.113.221.222 is in the list",ipList.isIn("10.113.221.222"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithCommentsOnly() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(0,includesLen);
assertEquals(0,excludesLen);
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithTabs() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" \n");
efw.write(" somehost \t somehost2 \n somehost4");
efw.write(" somehost3 \t # somehost5");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" \n");
ifw.write(" somehost \t somehost2 \n somehost4");
ifw.write(" somehost3 \t # somehost5");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(4,includesLen);
assertEquals(4,excludesLen);
assertTrue(hfp.getHosts().contains("somehost2"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertTrue(hfp.getExcludedHosts().contains("somehost2"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithSpaces() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" somehost somehost2");
efw.write(" somehost3 # somehost4");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" somehost somehost2");
ifw.write(" somehost3 # somehost4");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(3,includesLen);
assertEquals(3,excludesLen);
assertTrue(hfp.getHosts().contains("somehost3"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("somehost4"));
assertTrue(hfp.getExcludedHosts().contains("somehost3"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost4"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithNull() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.close();
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(0,includesLen);
assertEquals(0,excludesLen);
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshHostFileReaderWithNonexistentFile() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.close();
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
assertTrue(INCLUDES_FILE.delete());
try {
hfp.refresh();
Assert.fail("Should throw FileNotFoundException");
}
catch ( FileNotFoundException ex) {
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostsFileReader() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write("somehost1\n");
efw.write("#This-is-comment\n");
efw.write("somehost2\n");
efw.write("somehost3 # host3\n");
efw.write("somehost4\n");
efw.write("somehost4 somehost5\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write("somehost1\n");
ifw.write("somehost2\n");
ifw.write("somehost3\n");
ifw.write("#This-is-comment\n");
ifw.write("somehost4 # host4\n");
ifw.write("somehost4 somehost5\n");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(5,includesLen);
assertEquals(5,excludesLen);
assertTrue(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("host3"));
assertTrue(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("host4"));
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAdditionsAndRemovals(){
IdentityHashStore store=new IdentityHashStore(0);
final int NUM_KEYS=1000;
LOG.debug("generating " + NUM_KEYS + " keys");
final List keys=new ArrayList(NUM_KEYS);
for (int i=0; i < NUM_KEYS; i++) {
keys.add(new Key("key " + i));
}
for (int i=0; i < NUM_KEYS; i++) {
store.put(keys.get(i),i);
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertTrue(keys.contains(k));
}
}
);
for (int i=0; i < NUM_KEYS; i++) {
Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i)));
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("expected all entries to be removed");
}
}
);
Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty());
Assert.assertEquals(1024,store.capacity());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testStartingWithZeroCapacity(){
IdentityHashStore store=new IdentityHashStore(0);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
final Key key1=new Key("key1");
Integer value1=new Integer(100);
store.put(key1,value1);
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertEquals(key1,k);
}
}
);
Assert.assertEquals(value1,store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testDuplicateInserts(){
IdentityHashStore store=new IdentityHashStore(4);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
Key key1=new Key("key1");
Integer value1=new Integer(100);
Integer value2=new Integer(200);
Integer value3=new Integer(300);
store.put(key1,value1);
Key equalToKey1=new Key("key1");
Assert.assertNull(store.get(equalToKey1));
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.put(key1,value2);
store.put(key1,value3);
final List allValues=new LinkedList();
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
allValues.add(v);
}
}
);
Assert.assertEquals(3,allValues.size());
for (int i=0; i < 3; i++) {
Integer value=store.remove(key1);
Assert.assertTrue(allValues.remove(value));
}
Assert.assertNull(store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
BooleanVerifier
@Test public void testExpandedClasspath() throws Exception {
String jar=JarFinder.getJar(TestJarFinder.class);
Assert.assertTrue(new File(jar).exists());
}
BooleanVerifier
@Test public void testJar() throws Exception {
String jar=JarFinder.getJar(LogFactory.class);
Assert.assertTrue(new File(jar).exists());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRemoveSomeViaIterator(){
ArrayList list=getRandomList(100,123);
LightWeightGSet set=new LightWeightGSet(16);
for ( Integer i : list) {
set.put(new TestElement(i));
}
long sum=0;
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
sum+=iter.next().getVal();
}
long mode=sum / set.size();
LOG.info("Removing all elements above " + mode);
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
int item=iter.next().getVal();
if (item > mode) {
iter.remove();
}
}
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
Assert.assertTrue(iter.next().getVal() <= mode);
}
}
BooleanVerifier
/**
* This is to test backward compatibility of ReflectionUtils for
* JobConfigurable objects.
* This should be made deprecated along with the mapred package HADOOP-1230.
* Should be removed when mapred package is removed.
*/
@Test public void testSetConf(){
JobConfigurableOb ob=new JobConfigurableOb();
ReflectionUtils.setConf(ob,new Configuration());
assertFalse(ob.configured);
ReflectionUtils.setConf(ob,new JobConf());
assertTrue(ob.configured);
}
BooleanVerifier
@Test public void testRunjar() throws Throwable {
File outFile=new File(TEST_ROOT_DIR,"out");
if (outFile.exists()) {
outFile.delete();
}
File makeTestJar=makeTestJar();
String[] args=new String[3];
args[0]=makeTestJar.getAbsolutePath();
args[1]="org.apache.hadoop.util.Hello";
args[2]=outFile.toString();
RunJar.main(args);
Assert.assertTrue("RunJar failed",outFile.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRs(){
MachineList ml=new MachineList(CIDR_LIST);
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.119.103.111"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNames() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("differentName");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStaticIPHostNameList() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNamesReverserIpMatch() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("host4");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRWith16bitmask(){
MachineList ml=new MachineList(CIDR_LIST1);
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRWith8BitMask(){
MachineList ml=new MachineList(CIDR_LIST2);
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testIPandCIDRs(){
MachineList ml=new MachineList(IP_CIDR_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
InternalCallVerifier BooleanVerifier
@Test public void testIPList(){
MachineList ml=new MachineList(IP_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testHostNameIPandCIDRs(){
MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
InternalCallVerifier BooleanVerifier
@Test public void testWildCard(){
MachineList ml=new MachineList("*");
assertTrue(ml.includes("10.119.103.112"));
assertTrue(ml.includes("1.2.3.4"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetCollection(){
MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST);
Collection col=ml.getCollection();
assertEquals(7,ml.getCollection().size());
for ( String item : StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) {
assertTrue(col.contains(item));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testIPListSpaces(){
MachineList ml=new MachineList(IP_LIST_SPACES);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
}
BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testNativeCodeLoaded(){
if (requireTestJni() == false) {
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
return;
}
if (!NativeCodeLoader.isNativeCodeLoaded()) {
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " + "libhadoop.so was not loaded.");
}
assertFalse(NativeCodeLoader.getLibraryName().isEmpty());
assertFalse(ZlibFactory.getLibraryName().isEmpty());
if (NativeCodeLoader.buildSupportsSnappy()) {
assertFalse(SnappyCodec.getLibraryName().isEmpty());
}
if (NativeCodeLoader.buildSupportsOpenssl()) {
assertFalse(OpensslCipher.getLibraryName().isEmpty());
}
assertFalse(Lz4Codec.getLibraryName().isEmpty());
LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
}
APIUtilityVerifier BooleanVerifier
@Test public void testRpcClientId(){
byte[] uuid=ClientId.getClientId();
RpcRequestHeaderProto header=ProtoUtil.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,OperationProto.RPC_FINAL_PACKET,0,RpcConstants.INVALID_RETRY_COUNT,uuid);
assertTrue(Arrays.equals(uuid,header.getClientId().toByteArray()));
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testCacheDoesntLeak() throws Exception {
int iterations=9999;
for (int i=0; i < iterations; i++) {
URLClassLoader loader=new URLClassLoader(new URL[0],getClass().getClassLoader());
Class cl=Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild",false,loader);
Object o=ReflectionUtils.newInstance(cl,null);
assertEquals(cl,o.getClass());
}
System.gc();
assertTrue(cacheSize() + " too big",cacheSize() < iterations);
}
BooleanVerifier
@Test public void testGetDeclaredFieldsIncludingInherited(){
Parent child=new Parent(){
private int childField;
@SuppressWarnings("unused") public int getChildField(){
return childField;
}
}
;
List fields=ReflectionUtils.getDeclaredFieldsIncludingInherited(child.getClass());
boolean containsParentField=false;
boolean containsChildField=false;
for ( Field field : fields) {
if (field.getName().equals("parentField")) {
containsParentField=true;
}
else if (field.getName().equals("childField")) {
containsChildField=true;
}
}
List methods=ReflectionUtils.getDeclaredMethodsIncludingInherited(child.getClass());
boolean containsParentMethod=false;
boolean containsChildMethod=false;
for ( Method method : methods) {
if (method.getName().equals("getParentField")) {
containsParentMethod=true;
}
else if (method.getName().equals("getChildField")) {
containsChildMethod=true;
}
}
assertTrue("Missing parent field",containsParentField);
assertTrue("Missing child field",containsChildField);
assertTrue("Missing parent method",containsParentMethod);
assertTrue("Missing child method",containsChildMethod);
}
BooleanVerifier
/**
* Test default unjarring behavior - unpack everything
*/
@Test public void testUnJar() throws Exception {
File unjarDir=new File(TEST_ROOT_DIR,"unjar-all");
assertFalse("unjar dir shouldn't exist at test start",new File(unjarDir,"foobar.txt").exists());
RunJar.unJar(new File(TEST_ROOT_DIR,TEST_JAR_NAME),unjarDir);
assertTrue("foobar unpacked",new File(unjarDir,"foobar.txt").exists());
assertTrue("foobaz unpacked",new File(unjarDir,"foobaz.txt").exists());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void shutdownHookManager(){
ShutdownHookManager mgr=ShutdownHookManager.get();
Assert.assertNotNull(mgr);
Assert.assertEquals(0,mgr.getShutdownHooksInOrder().size());
Runnable hook1=new Runnable(){
@Override public void run(){
}
}
;
Runnable hook2=new Runnable(){
@Override public void run(){
}
}
;
mgr.addShutdownHook(hook1,0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(0));
mgr.removeShutdownHook(hook1);
Assert.assertFalse(mgr.hasShutdownHook(hook1));
mgr.addShutdownHook(hook1,0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
mgr.addShutdownHook(hook2,1);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertTrue(mgr.hasShutdownHook(hook2));
Assert.assertEquals(2,mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook2,mgr.getShutdownHooksInOrder().get(0));
Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(1));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000) public void testShutdownThread(){
Thread thread=new Thread(sampleRunnable);
thread.start();
boolean ret=ShutdownThreadsHelper.shutdownThread(thread);
boolean isTerminated=!thread.isAlive();
assertEquals("Incorrect return value",ret,isTerminated);
assertTrue("Thread is not shutdown",isTerminated);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testShutdownThreadPool() throws InterruptedException {
ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(1);
executor.execute(sampleRunnable);
boolean ret=ShutdownThreadsHelper.shutdownExecutorService(executor);
boolean isTerminated=executor.isTerminated();
assertEquals("Incorrect return value",ret,isTerminated);
assertTrue("ExecutorService is not shutdown",isTerminated);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetUniqueNonEmptyTrimmedStrings(){
final String TO_SPLIT=",foo, bar,baz,,blah,blah,bar,";
Collection col=StringUtils.getTrimmedStringCollection(TO_SPLIT);
assertEquals(4,col.size());
assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"})));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testLs() throws IOException {
final String content="6bytes";
final int contentSize=content.length();
File testFile=new File(TEST_DIR,"file1");
writeFile(testFile,content);
String output=Shell.execCommand(Shell.WINUTILS,"ls",testFile.getCanonicalPath());
String[] outputArgs=output.split("[ \r\n]");
assertTrue(outputArgs[0].equals("-rwx------"));
assertTrue(outputArgs[outputArgs.length - 1].equals(testFile.getCanonicalPath()));
output=Shell.execCommand(Shell.WINUTILS,"ls","-F",testFile.getCanonicalPath());
outputArgs=output.split("[|\r\n]");
assertEquals(9,outputArgs.length);
assertTrue(outputArgs[0].equals("-rwx------"));
assertEquals(contentSize,Long.parseLong(outputArgs[4]));
assertTrue(outputArgs[8].equals(testFile.getCanonicalPath()));
testFile.delete();
assertFalse(testFile.exists());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInLink() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath();
String link=new File(TEST_DIR,"link").getPath().replaceAll("\\\\","/");
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in link: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate behavior of chmod commands on directories on Windows.
*/
@Test(timeout=30000) public void testBasicChmodOnDir() throws IOException {
File a=new File(TEST_DIR,"a");
File b=new File(a,"b");
a.mkdirs();
assertTrue(b.createNewFile());
chmod("300",a);
String[] files=a.list();
assertTrue("Listing a directory without read permission should fail",null == files);
chmod("700",a);
files=a.list();
assertEquals("b",files[0]);
chmod("500",a);
File c=new File(a,"c");
try {
c.createNewFile();
assertFalse("writeFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed to create a file when directory " + "permissions are 577");
}
assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete());
assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d")));
chmod("700",a);
assertTrue(c.createNewFile());
File d=new File(a,"d");
assertTrue(c.renameTo(d));
chmod("600",a);
files=a.list();
assertEquals("d",files[0]);
assertTrue(d.delete());
File e=new File(a,"e");
assertTrue(e.createNewFile());
assertTrue(e.renameTo(new File(a,"f")));
chmod("700",a);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInTarget() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath().replaceAll("\\\\","/");
String link=new File(TEST_DIR,"link").getPath();
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in target: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
BooleanVerifier
@Test(timeout=30000) public void testChown() throws IOException {
File a=new File(TEST_DIR,"a");
assertTrue(a.createNewFile());
String username=System.getProperty("user.name");
String qualifiedUsername=Shell.execCommand("whoami").trim();
String admins="Administrators";
String qualifiedAdmins="BUILTIN\\Administrators";
chown(username + ":" + admins,a);
assertOwners(a,qualifiedUsername,qualifiedAdmins);
chown(username,a);
chown(":" + admins,a);
assertOwners(a,qualifiedUsername,qualifiedAdmins);
chown(":" + admins,a);
chown(username + ":",a);
assertOwners(a,qualifiedUsername,qualifiedAdmins);
assertTrue(a.delete());
assertFalse(a.exists());
}
BooleanVerifier
@Test(timeout=30000) public void testBasicChmod() throws IOException {
File a=new File(TEST_DIR,"a");
a.createNewFile();
chmod("377",a);
try {
readFile(a);
assertFalse("readFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed read from a file with permissions 377");
}
chmod("700",a);
chmod("577",a);
try {
writeFile(a,"test");
assertFalse("writeFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed write to a file with permissions 577");
}
chmod("700",a);
assertTrue(a.delete());
File winutilsFile=new File(Shell.WINUTILS);
File aExe=new File(TEST_DIR,"a.exe");
FileUtils.copyFile(winutilsFile,aExe);
chmod("677",aExe);
try {
Shell.execCommand(aExe.getCanonicalPath(),"ls");
assertFalse("executing " + aExe + " should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed to execute a file with permissions 677");
}
assertTrue(aExe.delete());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=30000) public void testReadLink() throws IOException {
File dir1=new File(TEST_DIR,"dir1");
assertTrue(dir1.mkdirs());
File file1=new File(dir1,"file1.txt");
assertTrue(file1.createNewFile());
File dirLink=new File(TEST_DIR,"dlink");
File fileLink=new File(TEST_DIR,"flink");
Shell.execCommand(Shell.WINUTILS,"symlink",dirLink.toString(),dir1.toString());
Shell.execCommand(Shell.WINUTILS,"symlink",fileLink.toString(),file1.toString());
String readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",dirLink.toString());
assertThat(readLinkOutput,equalTo(dir1.toString()));
readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",fileLink.toString());
assertThat(readLinkOutput,equalTo(file1.toString()));
try {
Shell.execCommand(Shell.WINUTILS,"readlink","");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","ThereIsNoSuchLink");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",dir1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",file1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","a","b");
fail("Failed to get Shell.ExitCodeException with bad parameters");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
}
BooleanVerifier
@Test public void testEmptyAuth(){
List result=ZKUtil.parseAuth("");
assertTrue(result.isEmpty());
}
BooleanVerifier
@Test public void testEmptyACL(){
List result=ZKUtil.parseACLs("");
assertTrue(result.isEmpty());
}
BooleanVerifier
@Test public void testNullACL(){
List result=ZKUtil.parseACLs(null);
assertTrue(result.isEmpty());
}
UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testConfIndirection() throws IOException {
assertNull(ZKUtil.resolveConfIndirection(null));
assertEquals("x",ZKUtil.resolveConfIndirection("x"));
TEST_FILE.getParentFile().mkdirs();
Files.write("hello world",TEST_FILE,Charsets.UTF_8);
assertEquals("hello world",ZKUtil.resolveConfIndirection("@" + TEST_FILE.getAbsolutePath()));
try {
ZKUtil.resolveConfIndirection("@" + BOGUS_FILE);
fail("Did not throw for non-existent file reference");
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE));
}
}
BooleanVerifier
@Test public void testNullAuth(){
List result=ZKUtil.parseAuth(null);
assertTrue(result.isEmpty());
}
InternalCallVerifier BooleanVerifier
@Test public void testCountingBloomFilter(){
int hashId=Hash.JENKINS_HASH;
CountingBloomFilter filter=new CountingBloomFilter(bitSize,hashFunctionNumber,hashId);
Key key=new Key(new byte[]{48,48});
filter.add(key);
assertTrue("CountingBloomFilter.membership error ",filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 1);
filter.add(key);
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 2);
filter.delete(key);
assertTrue("CountingBloomFilter.membership error ",filter.membershipTest(key));
filter.delete(key);
assertFalse("CountingBloomFilter.membership error ",filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 0);
BloomFilterCommonTester.of(hashId,numInsertions).withFilterInstance(filter).withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,BloomFilterTestStrategy.ADD_KEYS_STRATEGY,BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,BloomFilterTestStrategy.WRITE_READ_STRATEGY,BloomFilterTestStrategy.FILTER_OR_STRATEGY,BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testHash(){
int iterations=30;
assertTrue("testHash jenkins error !!!",Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
assertTrue("testHash murmur error !!!",Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
assertTrue("testHash undefined",Hash.INVALID_HASH == Hash.parseHashType("undefined"));
Configuration cfg=new Configuration();
cfg.set("hadoop.util.hash.type","murmur");
assertTrue("testHash",MurmurHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
cfg.set("hadoop.util.hash.type","jenkins");
assertTrue("testHash jenkins configuration error !!!",JenkinsHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
assertTrue("testHash undefine configuration error !!!",MurmurHash.getInstance() == Hash.getInstance(cfg));
assertTrue("testHash error jenkin getInstance !!!",JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
assertTrue("testHash error murmur getInstance !!!",MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
assertNull("testHash error invalid getInstance !!!",Hash.getInstance(Hash.INVALID_HASH));
int murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes()));
}
murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67));
}
int jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes()));
}
jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testUnknownCall(){
Configuration conf=new Configuration();
conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName());
YarnRPC rpc=YarnRPC.create(conf);
String bindAddr="localhost:0";
InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr);
Server server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,null,1);
server.start();
ApplicationClientProtocol proxy=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,NetUtils.getConnectAddress(server),conf);
try {
proxy.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
Assert.fail("Excepted RPC call to fail with unknown method.");
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().matches("Unknown method getNewApplication called on.*" + "org.apache.hadoop.yarn.proto.ApplicationClientProtocol" + "\\$ApplicationClientProtocolService\\$BlockingInterface protocol."));
}
catch ( Exception e) {
e.printStackTrace();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test the method registerNodeManager. Method should return a not null
* result.
*/
@Test public void testResourceTrackerPBClientImpl() throws Exception {
RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
assertNotNull(client.registerNodeManager(request));
ResourceTrackerTestImpl.exception=true;
try {
client.registerNodeManager(request);
fail("there should be YarnException");
}
catch ( YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}
finally {
ResourceTrackerTestImpl.exception=false;
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test the method nodeHeartbeat. Method should return a not null result.
*/
@Test public void testNodeHeartbeat() throws Exception {
NodeHeartbeatRequest request=recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
assertNotNull(client.nodeHeartbeat(request));
ResourceTrackerTestImpl.exception=true;
try {
client.nodeHeartbeat(request);
fail("there should be YarnException");
}
catch ( YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}
finally {
ResourceTrackerTestImpl.exception=false;
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test MasterKeyPBImpl.
*/
@Test public void testMasterKeyPBImpl(){
MasterKeyPBImpl original=new MasterKeyPBImpl();
original.setBytes(ByteBuffer.allocate(0));
original.setKeyId(1);
MasterKeyPBImpl copy=new MasterKeyPBImpl(original.getProto());
assertEquals(1,copy.getKeyId());
assertTrue(original.equals(copy));
assertEquals(original.hashCode(),copy.hashCode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test SerializedExceptionPBImpl.
*/
@Test public void testSerializedExceptionPBImpl(){
SerializedExceptionPBImpl original=new SerializedExceptionPBImpl();
original.init("testMessage");
SerializedExceptionPBImpl copy=new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage",copy.getMessage());
original=new SerializedExceptionPBImpl();
original.init("testMessage",new Throwable(new Throwable("parent")));
copy=new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage",copy.getMessage());
assertEquals("parent",copy.getCause().getMessage());
assertTrue(copy.getRemoteTrace().startsWith("java.lang.Throwable: java.lang.Throwable: parent"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttemptId(){
ApplicationAttemptId a1=createAppAttemptId(10l,1,1);
ApplicationAttemptId a2=createAppAttemptId(10l,1,2);
ApplicationAttemptId a3=createAppAttemptId(10l,2,1);
ApplicationAttemptId a4=createAppAttemptId(8l,1,4);
ApplicationAttemptId a5=createAppAttemptId(10l,1,1);
Assert.assertTrue(a1.equals(a5));
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a3));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.compareTo(a5) == 0);
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) < 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a5.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationAttemptId a6=createAppAttemptId(ts,543627,33492611);
Assert.assertEquals("appattempt_10_0001_000001",a1.toString());
Assert.assertEquals("appattempt_" + ts + "_543627_33492611",a6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationId(){
ApplicationId a1=ApplicationId.newInstance(10l,1);
ApplicationId a2=ApplicationId.newInstance(10l,2);
ApplicationId a3=ApplicationId.newInstance(10l,1);
ApplicationId a4=ApplicationId.newInstance(8l,3);
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.equals(a3));
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) == 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a2.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationId a5=ApplicationId.newInstance(ts,45436343);
Assert.assertEquals("application_10_0001",a1.toString());
Assert.assertEquals("application_" + ts + "_45436343",a5.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerId(){
ContainerId c1=newContainerId(1,1,10l,1);
ContainerId c2=newContainerId(1,1,10l,2);
ContainerId c3=newContainerId(1,1,10l,1);
ContainerId c4=newContainerId(1,3,10l,1);
ContainerId c5=newContainerId(1,3,8l,1);
Assert.assertTrue(c1.equals(c3));
Assert.assertFalse(c1.equals(c2));
Assert.assertFalse(c1.equals(c4));
Assert.assertFalse(c1.equals(c5));
Assert.assertTrue(c1.compareTo(c3) == 0);
Assert.assertTrue(c1.compareTo(c2) < 0);
Assert.assertTrue(c1.compareTo(c4) < 0);
Assert.assertTrue(c1.compareTo(c5) > 0);
Assert.assertTrue(c1.hashCode() == c3.hashCode());
Assert.assertFalse(c1.hashCode() == c2.hashCode());
Assert.assertFalse(c1.hashCode() == c4.hashCode());
Assert.assertFalse(c1.hashCode() == c5.hashCode());
long ts=System.currentTimeMillis();
ContainerId c6=newContainerId(36473,4365472,ts,25645811);
Assert.assertEquals("container_10_0001_01_000001",c1.toString());
Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",c6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testResourceIncreaseContext(){
byte[] identifier=new byte[]{1,2,3,4};
Token token=Token.newInstance(identifier,"","".getBytes(),"");
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(containerId,resource,token);
ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto();
ctx=new ContainerResourceIncreasePBImpl(proto);
Assert.assertEquals(ctx.getCapability(),resource);
Assert.assertEquals(ctx.getContainerId(),containerId);
Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier().array(),identifier));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeId(){
NodeId nodeId1=NodeId.newInstance("10.18.52.124",8041);
NodeId nodeId2=NodeId.newInstance("10.18.52.125",8038);
NodeId nodeId3=NodeId.newInstance("10.18.52.124",8041);
NodeId nodeId4=NodeId.newInstance("10.18.52.124",8039);
Assert.assertTrue(nodeId1.equals(nodeId3));
Assert.assertFalse(nodeId1.equals(nodeId2));
Assert.assertFalse(nodeId3.equals(nodeId4));
Assert.assertTrue(nodeId1.compareTo(nodeId3) == 0);
Assert.assertTrue(nodeId1.compareTo(nodeId2) < 0);
Assert.assertTrue(nodeId3.compareTo(nodeId4) > 0);
Assert.assertTrue(nodeId1.hashCode() == nodeId3.hashCode());
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
Assert.assertEquals("10.18.52.124:8041",nodeId1.toString());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDebugFlag() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1","--debug"};
LOG.info("Initializing DS Client");
Client client=new Client(new Configuration(yarnCluster.getConfig()));
Assert.assertTrue(client.init(args));
LOG.info("Running DS Client");
Assert.assertTrue(client.run());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testContainerLaunchFailureHandling() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"};
LOG.info("Initializing DS Client");
Client client=new Client(ContainerLaunchFailAppMaster.class.getName(),new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertFalse(result);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDSRestartWithPreviousRunningContainers() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_command","sleep 8","--master_memory","512","--container_memory","128","--keep_containers_across_application_attempts"};
LOG.info("Initializing DS Client");
Client client=new Client(TestDSFailedAppMaster.class.getName(),new Configuration(yarnCluster.getConfig()));
client.init(args);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithInvalidArgs() throws Exception {
Client client=new Client(new Configuration(yarnCluster.getConfig()));
LOG.info("Initializing DS Client with no args");
try {
client.init(new String[]{});
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No args"));
}
LOG.info("Initializing DS Client with no jar file");
try {
String[] args={"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No jar"));
}
LOG.info("Initializing DS Client with no shell command");
try {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--master_memory","512","--container_memory","128"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No shell command"));
}
LOG.info("Initializing DS Client with invalid no. of containers");
try {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","-1","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Invalid no. of containers"));
}
LOG.info("Initializing DS Client with invalid no. of vcores");
try {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","-2","--container_memory","128","--container_vcores","1"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Invalid virtual cores specified"));
}
LOG.info("Initializing DS Client with --shell_command and --shell_script");
try {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1","--shell_script","test.sh"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Can not specify shell_command option " + "and shell_script option at the same time"));
}
LOG.info("Initializing DS Client without --shell_command and --shell_script");
try {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
client.init(args);
Assert.fail("Exception is expected");
}
catch ( IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No shell command or shell script specified " + "to be executed by application master"));
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithCustomLogPropertyFile() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customLogProperty=new File(tmpDir,"custom_log4j.properties");
if (customLogProperty.exists()) {
customLogProperty.delete();
}
if (!customLogProperty.createNewFile()) {
Assert.fail("Can not create custom log4j property file.");
}
PrintWriter fileWriter=new PrintWriter(customLogProperty);
fileWriter.write("log4j.rootLogger=debug,stdout");
fileWriter.close();
String[] args={"--jar",APPMASTER_JAR,"--num_containers","3","--shell_command","echo","--shell_args","HADOOP","--log_properties",customLogProperty.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
final Log LOG_Client=LogFactory.getLog(Client.class);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertFalse(LOG_Client.isDebugEnabled());
final Log LOG_AM=LogFactory.getLog(ApplicationMaster.class);
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertFalse(LOG_AM.isDebugEnabled());
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(verifyContainerLog(3,null,true,"DEBUG") > 10);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertTrue(LOG_Client.isDebugEnabled());
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertTrue(LOG_AM.isDebugEnabled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testDSShell() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result=new AtomicBoolean(false);
Thread t=new Thread(){
public void run(){
try {
result.set(client.run());
}
catch ( Exception e) {
throw new RuntimeException(e);
}
}
}
;
t.start();
YarnClient yarnClient=YarnClient.createYarnClient();
yarnClient.init(new Configuration(yarnCluster.getConfig()));
yarnClient.start();
String hostName=NetUtils.getHostname();
boolean verified=false;
String errorMessage="";
while (!verified) {
List apps=yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
ApplicationReport appReport=apps.get(0);
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage="Expected host name to start with '" + hostName + "', was '"+ appReport.getHost()+ "'. Expected rpc port to be '-1', was '"+ appReport.getRpcPort()+ "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified=true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
break;
}
}
Assert.assertTrue(errorMessage,verified);
t.join();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result.get());
TimelineEntities entitiesAttempts=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entitiesAttempts);
Assert.assertEquals(1,entitiesAttempts.getEntities().size());
Assert.assertEquals(2,entitiesAttempts.getEntities().get(0).getEvents().size());
Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
TimelineEntities entities=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entities);
Assert.assertEquals(2,entities.getEntities().size());
Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_CONTAINER.toString());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDSShellWithMultipleArgs() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","4","--shell_command","echo","--shell_args","HADOOP YARN MAPREDUCE HDFS","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
List expectedContent=new ArrayList();
expectedContent.add("HADOOP YARN MAPREDUCE HDFS");
verifyContainerLog(4,expectedContent,false,"");
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithShellScript() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customShellScript=new File(tmpDir,"custom_script.sh");
if (customShellScript.exists()) {
customShellScript.delete();
}
if (!customShellScript.createNewFile()) {
Assert.fail("Can not create custom shell script file.");
}
PrintWriter fileWriter=new PrintWriter(customShellScript);
fileWriter.write("echo testDSShellWithShellScript");
fileWriter.close();
System.out.println(customShellScript.getAbsolutePath());
String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_script",customShellScript.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
List expectedContent=new ArrayList();
expectedContent.add("testDSShellWithShellScript");
verifyContainerLog(1,expectedContent,false,"");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testUMALauncher() throws Exception {
String classpath=getTestRuntimeClasspath();
String javaHome=System.getenv("JAVA_HOME");
if (javaHome == null) {
LOG.fatal("JAVA_HOME not defined. Test not running.");
return;
}
String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " success"};
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig())){
public void launchAM( ApplicationAttemptId attemptId) throws IOException, YarnException {
YarnApplicationAttemptState attemptState=rmClient.getApplicationAttemptReport(attemptId).getYarnApplicationAttemptState();
Assert.assertTrue(attemptState.equals(YarnApplicationAttemptState.LAUNCHED));
super.launchAM(attemptId);
}
}
;
boolean initSuccess=launcher.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running Launcher");
boolean result=launcher.run();
LOG.info("Launcher run completed. Result=" + result);
Assert.assertTrue(result);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testUMALauncherError() throws Exception {
String classpath=getTestRuntimeClasspath();
String javaHome=System.getenv("JAVA_HOME");
if (javaHome == null) {
LOG.fatal("JAVA_HOME not defined. Test not running.");
return;
}
String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " failure"};
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=launcher.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running Launcher");
try {
launcher.run();
fail("Expected an exception to occur as launch should have failed");
}
catch ( RuntimeException e) {
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetClusterNodesOnHA() throws Exception {
List reports=client.getNodeReports(NodeState.RUNNING);
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeNodeReports(),reports);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationAttemptsOnHA() throws Exception {
List reports=client.getApplicationAttempts(cluster.createFakeAppId());
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),reports);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetContainersOnHA() throws Exception {
List reports=client.getContainers(cluster.createFakeApplicationAttemptId());
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeContainerReports(),reports);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetNewApplicationOnHA() throws Exception {
ApplicationId appId=client.createApplication().getApplicationSubmissionContext().getApplicationId();
Assert.assertTrue(appId != null);
Assert.assertEquals(cluster.createFakeAppId(),appId);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetQueueInfoOnHA() throws Exception {
QueueInfo queueInfo=client.getQueueInfo("root");
Assert.assertTrue(queueInfo != null);
Assert.assertEquals(cluster.createFakeQueueInfo(),queueInfo);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationAttemptReportOnHA() throws Exception {
ApplicationAttemptReport report=client.getApplicationAttemptReport(cluster.createFakeApplicationAttemptId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeApplicationAttemptReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationReportOnHA() throws Exception {
ApplicationReport report=client.getApplicationReport(cluster.createFakeAppId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeAppReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetContainerReportOnHA() throws Exception {
ContainerReport report=client.getContainerReport(cluster.createFakeContainerId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeContainerReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetQueueUserAclsOnHA() throws Exception {
List queueUserAclsList=client.getQueueAclsInfo();
Assert.assertTrue(queueUserAclsList != null && !queueUserAclsList.isEmpty());
Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),queueUserAclsList);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationsOnHA() throws Exception {
List reports=client.getApplications();
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeAppReports(),reports);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=15000) public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext=Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer=Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability=Records.newRecord(Resource.class);
capability.setMemory(10);
capability.setVirtualCores(1);
appContext.setResource(capability);
ApplicationId appId=client.submitApplication(appContext);
Assert.assertTrue(getActiveRM().getRMContext().getRMApps().containsKey(appId));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetClusterMetricsOnHA() throws Exception {
YarnClusterMetrics clusterMetrics=client.getYarnClusterMetrics();
Assert.assertTrue(clusterMetrics != null);
Assert.assertEquals(cluster.createFakeYarnClusterMetrics(),clusterMetrics);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetRMDelegationTokenService(){
String defaultRMAddress=YarnConfiguration.DEFAULT_RM_ADDRESS;
YarnConfiguration conf=new YarnConfiguration();
Text tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
String[] services=tokenService.toString().split(",");
assertEquals(1,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm1"),"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm2"),"0.0.0.0");
tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
services=tokenService.toString().split(",");
assertEquals(2,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500) public void testException() throws Exception {
PrintStream oldErrPrintStream=System.err;
ByteArrayOutputStream dataErr=new ByteArrayOutputStream();
System.setErr(new PrintStream(dataErr));
try {
when(admin.refreshQueues(any(RefreshQueuesRequest.class))).thenThrow(new IOException("test exception"));
String[] args={"-refreshQueues"};
assertEquals(-1,rmAdminCLI.run(args));
verify(admin).refreshQueues(any(RefreshQueuesRequest.class));
assertTrue(dataErr.toString().contains("refreshQueues: test exception"));
}
finally {
System.setErr(oldErrPrintStream);
}
}
BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test printing of help messages
*/
@Test(timeout=500) public void testHelp() throws Exception {
PrintStream oldOutPrintStream=System.out;
PrintStream oldErrPrintStream=System.err;
ByteArrayOutputStream dataOut=new ByteArrayOutputStream();
ByteArrayOutputStream dataErr=new ByteArrayOutputStream();
System.setOut(new PrintStream(dataOut));
System.setErr(new PrintStream(dataErr));
try {
String[] args={"-help"};
assertEquals(0,rmAdminCLI.run(args));
oldOutPrintStream.println(dataOut);
assertTrue(dataOut.toString().contains("rmadmin is the command to execute YARN administrative commands."));
assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]]"));
assertTrue(dataOut.toString().contains("-refreshQueues: Reload the queues' acls, states and scheduler " + "specific properties."));
assertTrue(dataOut.toString().contains("-refreshNodes: Refresh the hosts information at the " + "ResourceManager."));
assertTrue(dataOut.toString().contains("-refreshUserToGroupsMappings: Refresh user-to-groups mappings"));
assertTrue(dataOut.toString().contains("-refreshSuperUserGroupsConfiguration: Refresh superuser proxy" + " groups mappings"));
assertTrue(dataOut.toString().contains("-refreshAdminAcls: Refresh acls for administration of " + "ResourceManager"));
assertTrue(dataOut.toString().contains("-refreshServiceAcl: Reload the service-level authorization" + " policy file"));
assertTrue(dataOut.toString().contains("-help [cmd]: Displays help for the given command or all " + "commands if none"));
testError(new String[]{"-help","-refreshQueues"},"Usage: yarn rmadmin [-refreshQueues]",dataErr,0);
testError(new String[]{"-help","-refreshNodes"},"Usage: yarn rmadmin [-refreshNodes]",dataErr,0);
testError(new String[]{"-help","-refreshUserToGroupsMappings"},"Usage: yarn rmadmin [-refreshUserToGroupsMappings]",dataErr,0);
testError(new String[]{"-help","-refreshSuperUserGroupsConfiguration"},"Usage: yarn rmadmin [-refreshSuperUserGroupsConfiguration]",dataErr,0);
testError(new String[]{"-help","-refreshAdminAcls"},"Usage: yarn rmadmin [-refreshAdminAcls]",dataErr,0);
testError(new String[]{"-help","-refreshServiceAcl"},"Usage: yarn rmadmin [-refreshServiceAcl]",dataErr,0);
testError(new String[]{"-help","-getGroups"},"Usage: yarn rmadmin [-getGroups [username]]",dataErr,0);
testError(new String[]{"-help","-transitionToActive"},"Usage: yarn rmadmin [-transitionToActive " + " [--forceactive]]",dataErr,0);
testError(new String[]{"-help","-transitionToStandby"},"Usage: yarn rmadmin [-transitionToStandby ]",dataErr,0);
testError(new String[]{"-help","-getServiceState"},"Usage: yarn rmadmin [-getServiceState ]",dataErr,0);
testError(new String[]{"-help","-checkHealth"},"Usage: yarn rmadmin [-checkHealth ]",dataErr,0);
testError(new String[]{"-help","-failover"},"Usage: yarn rmadmin " + "[-failover [--forcefence] [--forceactive] " + " ]",dataErr,0);
testError(new String[]{"-help","-badParameter"},"Usage: yarn rmadmin",dataErr,0);
testError(new String[]{"-badParameter"},"badParameter: Unknown command",dataErr,-1);
assertEquals(0,rmAdminCLIWithHAEnabled.run(args));
oldOutPrintStream.println(dataOut);
assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]] [-transitionToActive "+ " [--forceactive]] [-transitionToStandby ] [-failover"+ " [--forcefence] [--forceactive] ] "+ "[-getServiceState ] [-checkHealth ]"));
}
finally {
System.setOut(oldOutPrintStream);
System.setErr(oldErrPrintStream);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWebAppProxyInStandAloneMode() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
WebAppProxyServer webAppProxyServer=new WebAppProxyServer();
try {
conf.set(YarnConfiguration.PROXY_ADDRESS,"0.0.0.0:9099");
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
webAppProxyServer.init(conf);
Assert.assertEquals(STATE.INITED,webAppProxyServer.getServiceState());
webAppProxyServer.start();
Assert.assertEquals(STATE.STARTED,webAppProxyServer.getServiceState());
URL wrongUrl=new URL("http://0.0.0.0:9099/proxy/" + fakeAppId);
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
}
finally {
webAppProxyServer.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testExplicitFailover() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
explicitFailover();
verifyConnections();
explicitFailover();
verifyConnections();
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testAutomaticFailover() throws YarnException, InterruptedException, IOException {
conf.set(YarnConfiguration.RM_CLUSTER_ID,"yarn-test-cluster");
conf.set(YarnConfiguration.RM_ZK_ADDRESS,hostPort);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,2000);
cluster.init(conf);
cluster.start();
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
failover();
verifyConnections();
failover();
verifyConnections();
ResourceManager rm=cluster.getResourceManager(cluster.getActiveRMIndex());
RMFatalEvent event=new RMFatalEvent(RMFatalEventType.STATE_STORE_FENCED,"Fake RMFatalEvent");
rm.getRMContext().getDispatcher().getEventHandler().handle(event);
int maxWaitingAttempts=2000;
while (maxWaitingAttempts-- > 0) {
if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
break;
}
Thread.sleep(1);
}
Assert.assertFalse("RM didn't transition to Standby ",maxWaitingAttempts == 0);
verifyConnections();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException {
cluster=new MiniYARNCluster(TestRMFailover.class.getName(),2,0,1,1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url="http://0.0.0.0:18088";
String rm2Url="http://0.0.0.0:28088";
String header=getHeader("Refresh",rm2Url);
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/metrics");
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/jmx");
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/cluster/cluster");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/conf");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/stacks");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/logLevel");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/static");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/logs");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/info");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/apps");
assertTrue(header.contains("; url=" + rm1Url));
}
InternalCallVerifier BooleanVerifier
@Test public void testEmbeddedWebAppProxy() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
URL wrongUrl=new URL("http://0.0.0.0:18088/proxy/" + fakeAppId);
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
}
BooleanVerifier
@Test(timeout=15000) public void testResourceTrackerOnHA() throws Exception {
NodeId nodeId=NodeId.newInstance("localhost",0);
Resource resource=Resource.newInstance(2048,4);
RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(nodeId,0,resource,YarnVersionInfo.getVersion(),null,null);
resourceTracker.registerNodeManager(request);
Assert.assertTrue(waitForNodeManagerToConnect(10000,nodeId));
failoverThread=createAndStartFailoverThread();
NodeStatus status=NodeStatus.newInstance(NodeId.newInstance("localhost",0),0,null,null,null);
NodeHeartbeatRequest request2=NodeHeartbeatRequest.newInstance(status,null,null);
resourceTracker.nodeHeartbeat(request2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Simple test Resource request.
* Test hashCode, equals and compare.
*/
@Test public void testResourceRequest(){
Resource resource=recordFactory.newRecordInstance(Resource.class);
Priority priority=recordFactory.newRecordInstance(Priority.class);
ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2);
ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2);
assertTrue(original.equals(copy));
assertEquals(0,original.compareTo(copy));
assertTrue(original.hashCode() == copy.hashCode());
copy.setNumContainers(1);
assertFalse(original.equals(copy));
assertNotSame(0,original.compareTo(copy));
assertFalse(original.hashCode() == copy.hashCode());
}
BooleanVerifier PublicFieldVerifier
@Test(timeout=5000) public void testCallAMRMClientAsyncStopFromCallbackHandlerWithWaitFor() throws YarnException, IOException, InterruptedException {
Configuration conf=new Configuration();
final TestCallbackHandler2 callbackHandler=new TestCallbackHandler2();
@SuppressWarnings("unchecked") AMRMClient client=mock(AMRMClientImpl.class);
List completed=Arrays.asList(ContainerStatus.newInstance(newContainerId(0,0,0,0),ContainerState.COMPLETE,"",0));
final AllocateResponse response=createAllocateResponse(completed,new ArrayList(),null);
when(client.allocate(anyFloat())).thenReturn(response);
AMRMClientAsync asyncClient=AMRMClientAsync.createAMRMClientAsync(client,20,callbackHandler);
callbackHandler.asynClient=asyncClient;
asyncClient.init(conf);
asyncClient.start();
Supplier checker=new Supplier(){
@Override public Boolean get(){
return callbackHandler.notify;
}
}
;
asyncClient.registerApplicationMaster("localhost",1234,null);
asyncClient.waitFor(checker);
Assert.assertTrue(checker.get());
}
BooleanVerifier
@Test(timeout=10000) public void testAMRMClientAsyncShutDownWithWaitFor() throws Exception {
Configuration conf=new Configuration();
final TestCallbackHandler callbackHandler=new TestCallbackHandler();
@SuppressWarnings("unchecked") AMRMClient client=mock(AMRMClientImpl.class);
final AllocateResponse shutDownResponse=createAllocateResponse(new ArrayList(),new ArrayList(),null);
shutDownResponse.setAMCommand(AMCommand.AM_SHUTDOWN);
when(client.allocate(anyFloat())).thenReturn(shutDownResponse);
AMRMClientAsync asyncClient=AMRMClientAsync.createAMRMClientAsync(client,10,callbackHandler);
asyncClient.init(conf);
asyncClient.start();
Supplier checker=new Supplier(){
@Override public Boolean get(){
return callbackHandler.reboot;
}
}
;
asyncClient.registerApplicationMaster("localhost",1234,null);
asyncClient.waitFor(checker);
asyncClient.stop();
Assert.assertTrue(callbackHandler.callbackCount == 0);
verify(client,times(1)).allocate(anyFloat());
asyncClient.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testNMClientAsync() throws Exception {
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE,10);
int expectedSuccess=40;
int expectedFailure=40;
asyncClient=new MockNMClientAsync1(expectedSuccess,expectedFailure);
asyncClient.init(conf);
Assert.assertEquals("The max thread pool size is not correctly set",10,asyncClient.maxThreadPoolSize);
asyncClient.start();
for (int i=0; i < expectedSuccess + expectedFailure; ++i) {
if (i == expectedSuccess) {
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isAllSuccessCallsExecuted()) {
Thread.sleep(10);
}
asyncClient.setClient(mockNMClient(1));
}
Container container=mockContainer(i);
ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
asyncClient.startContainerAsync(container,clc);
}
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStartAndQueryFailureCallsExecuted()) {
Thread.sleep(10);
}
asyncClient.setClient(mockNMClient(2));
((TestCallbackHandler1)asyncClient.getCallbackHandler()).path=false;
for (int i=0; i < expectedFailure; ++i) {
Container container=mockContainer(expectedSuccess + expectedFailure + i);
ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
asyncClient.startContainerAsync(container,clc);
}
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStopFailureCallsExecuted()) {
Thread.sleep(10);
}
for ( String errorMsg : ((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs) {
System.out.println(errorMsg);
}
Assert.assertEquals("Error occurs in CallbackHandler",0,((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs.size());
for ( String errorMsg : ((MockNMClientAsync1)asyncClient).errorMsgs) {
System.out.println(errorMsg);
}
Assert.assertEquals("Error occurs in ContainerEventProcessor",0,((MockNMClientAsync1)asyncClient).errorMsgs.size());
while (asyncClient.containers.size() > 0) {
Thread.sleep(10);
}
asyncClient.stop();
Assert.assertFalse("The thread of Container Management Event Dispatcher is still alive",asyncClient.eventDispatcherThread.isAlive());
Assert.assertTrue("The thread pool is not shut down",asyncClient.threadPool.isShutdown());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testOutOfOrder() throws Exception {
CyclicBarrier barrierA=new CyclicBarrier(2);
CyclicBarrier barrierB=new CyclicBarrier(2);
CyclicBarrier barrierC=new CyclicBarrier(2);
asyncClient=new MockNMClientAsync2(barrierA,barrierB,barrierC);
asyncClient.init(new Configuration());
asyncClient.start();
final Container container=mockContainer(1);
final ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
Thread t=new Thread(){
@Override public void run(){
asyncClient.startContainerAsync(container,clc);
}
}
;
t.start();
barrierA.await();
asyncClient.stopContainerAsync(container.getId(),container.getNodeId());
barrierC.await();
Assert.assertFalse("Starting and stopping should be out of order",((TestCallbackHandler2)asyncClient.getCallbackHandler()).exceptionOccurred.get());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException {
AMRMClient amClient=null;
try {
amClient=AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability1=Resource.newInstance(1024,2);
Resource capability2=Resource.newInstance(1024,1);
Resource capability3=Resource.newInstance(1000,2);
Resource capability4=Resource.newInstance(2000,1);
Resource capability5=Resource.newInstance(1000,3);
Resource capability6=Resource.newInstance(2000,1);
Resource capability7=Resource.newInstance(2000,1);
ContainerRequest storedContainer1=new ContainerRequest(capability1,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability2,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability3,nodes,racks,priority);
ContainerRequest storedContainer4=new ContainerRequest(capability4,nodes,racks,priority);
ContainerRequest storedContainer5=new ContainerRequest(capability5,nodes,racks,priority);
ContainerRequest storedContainer6=new ContainerRequest(capability6,nodes,racks,priority);
ContainerRequest storedContainer7=new ContainerRequest(capability7,nodes,racks,priority2,false);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
amClient.addContainerRequest(storedContainer4);
amClient.addContainerRequest(storedContainer5);
amClient.addContainerRequest(storedContainer6);
amClient.addContainerRequest(storedContainer7);
List extends Collection> matches;
ContainerRequest storedRequest;
Resource testCapability1=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability1);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
Resource testCapability2=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority,node,testCapability2);
verifyMatches(matches,2);
int i=0;
for ( ContainerRequest storedRequest1 : matches.get(0)) {
if (i++ == 0) {
assertEquals(storedContainer4,storedRequest1);
}
else {
assertEquals(storedContainer6,storedRequest1);
}
}
amClient.removeContainerRequest(storedContainer6);
Resource testCapability3=Resource.newInstance(4000,4);
matches=amClient.getMatchingRequests(priority,node,testCapability3);
assert (matches.size() == 4);
Resource testCapability4=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability4);
assert (matches.size() == 2);
for ( Collection testSet : matches) {
assertEquals(1,testSet.size());
ContainerRequest testRequest=testSet.iterator().next();
assertTrue(testRequest != storedContainer4);
assertTrue(testRequest != storedContainer5);
assert (testRequest == storedContainer2 || testRequest == storedContainer3);
}
Resource testCapability5=Resource.newInstance(512,4);
matches=amClient.getMatchingRequests(priority,node,testCapability5);
assert (matches.size() == 0);
Resource testCapability7=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority2,ResourceRequest.ANY,testCapability7);
assert (matches.size() == 0);
matches=amClient.getMatchingRequests(priority2,node,testCapability7);
assert (matches.size() == 1);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer1);
assertEquals(3,amClient.ask.size());
assertEquals(0,amClient.release.size());
List localNodeBlacklist=new ArrayList();
localNodeBlacklist.add(node);
amClient.updateBlacklist(localNodeBlacklist,null);
int allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(0,allocatedContainerCount);
amClient.updateBlacklist(null,localNodeBlacklist);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer2);
allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(2,allocatedContainerCount);
assertTrue(amClient.blacklistAdditions.isEmpty());
assertTrue(amClient.blacklistRemovals.isEmpty());
ContainerRequest invalidContainerRequest=new ContainerRequest(Resource.newInstance(-1024,1),nodes,racks,priority);
amClient.addContainerRequest(invalidContainerRequest);
amClient.updateBlacklist(localNodeBlacklist,null);
try {
amClient.allocate(0.1f);
fail("there should be an exception here.");
}
catch ( Exception e) {
assertEquals(1,amClient.blacklistAdditions.size());
}
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException {
AMRMClient amClient=null;
try {
AMRMTokenSecretManager amrmTokenSecretManager=yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager();
amClient=AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
Long startTime=System.currentTimeMillis();
amClient.registerApplicationMaster("Host",10000,"");
org.apache.hadoop.security.token.Token amrmToken_1=getAMRMToken();
Assert.assertNotNull(amrmToken_1);
Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
e.printStackTrace();
}
}
amClient.allocate(0.1f);
org.apache.hadoop.security.token.Token amrmToken_2=getAMRMToken();
Assert.assertNotNull(amrmToken_2);
Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
Assert.assertNotEquals(amrmToken_1,amrmToken_2);
amClient.allocate(0.1f);
while (true) {
if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) {
if (amrmTokenSecretManager.getNextMasterKeyData() == null) {
break;
}
else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) {
break;
}
}
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
try {
UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser");
SecurityUtil.setTokenService(amrmToken_2,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
testUser.addToken(amrmToken_2);
testUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(),conf);
}
}
).allocate(Records.newRecord(AllocateRequest.class));
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof InvalidToken);
Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId()));
}
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=new AMRMClientImpl();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability=Resource.newInstance(1024,2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,null,priority);
amClient.addContainerRequest(storedContainer1);
List extends Collection> matches;
ContainerRequest storedRequest;
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,rack,capability);
assertTrue(matches.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAMRMClientMatchStorage() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Priority priority1=Records.newRecord(Priority.class);
priority1.setPriority(2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability,null,null,priority1);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
int containersRequestedAny=amClient.remoteRequestsTable.get(priority).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(2,containersRequestedAny);
containersRequestedAny=amClient.remoteRequestsTable.get(priority1).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(1,containersRequestedAny);
List extends Collection> matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority1,rack,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
verifyMatches(matches,1);
amClient.removeContainerRequest(storedContainer3);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
amClient.removeContainerRequest(storedContainer2);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
ContainerRequest storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer3);
int allocatedContainerCount=0;
int iterationsLeft=3;
while (allocatedContainerCount < 2 && iterationsLeft-- > 0) {
Log.info(" == alloc " + allocatedContainerCount + " it left "+ iterationsLeft);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
assertEquals(nodeCount,amClient.getClusterNodeCount());
allocatedContainerCount+=allocResponse.getAllocatedContainers().size();
for ( Container container : allocResponse.getAllocatedContainers()) {
ContainerRequest expectedRequest=container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3;
matches=amClient.getMatchingRequests(container.getPriority(),ResourceRequest.ANY,container.getResource());
verifyMatches(matches,1);
ContainerRequest matchedRequest=matches.get(0).iterator().next();
assertEquals(matchedRequest,expectedRequest);
amClient.removeContainerRequest(matchedRequest);
amClient.releaseAssignedContainer(container.getId());
}
if (allocatedContainerCount < containersRequestedAny) {
sleep(100);
}
}
assertEquals(2,allocatedContainerCount);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.release.size());
assertEquals(0,amClient.ask.size());
assertEquals(0,allocResponse.getAllocatedContainers().size());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception {
conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager2 rm1=new MyResourceManager2(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
Long startTime=System.currentTimeMillis();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
AMRMTokenSecretManager amrmTokenSecretManagerForRM1=rm1.getRMContext().getAMRMTokenSecretManager();
org.apache.hadoop.security.token.Token token=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addTokenIdentifier(token.decodeIdentifier());
AMRMClient amClient=new MyAMRMClientImpl(rm1);
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("h1",10000,"");
amClient.allocate(0.1f);
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
Assert.assertTrue(amrmTokenSecretManagerForRM1.getMasterKey().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId());
amClient.allocate(0.1f);
org.apache.hadoop.security.token.Token newToken=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
int waitCount=0;
while (waitCount++ <= 50) {
if (amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()) {
break;
}
try {
amClient.allocate(0.1f);
}
catch ( Exception ex) {
break;
}
Thread.sleep(500);
}
Assert.assertTrue(amrmTokenSecretManagerForRM1.getNextMasterKeyData() == null);
Assert.assertTrue(amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,"0.0.0.0:9030");
final MyResourceManager2 rm2=new MyResourceManager2(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
((MyAMRMClientImpl)amClient).updateRMProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
AMRMTokenSecretManager amrmTokenSecretManagerForRM2=rm2.getRMContext().getAMRMTokenSecretManager();
Assert.assertTrue(amrmTokenSecretManagerForRM2.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
Assert.assertTrue(amrmTokenSecretManagerForRM2.getNextMasterKeyData() == null);
try {
UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser");
SecurityUtil.setTokenService(token,rm2.getApplicationMasterService().getBindAddress());
testUser.addToken(token);
testUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,rm2.getApplicationMasterService().getBindAddress(),conf);
}
}
).allocate(Records.newRecord(AllocateRequest.class));
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof InvalidToken);
Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + token.decodeIdentifier().getApplicationAttemptId()));
}
amClient.allocate(0.1f);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
amClient.stop();
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testNMClientNoCleanupOnStop() throws YarnException, IOException {
rmClient.registerApplicationMaster("Host",10000,"");
testContainerManagement(nmClient,allocateContainers(rmClient,5));
rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
stopNmClient(false);
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainers();
assertEquals(0,nmClient.startedContainers.size());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=200000) public void testNMClient() throws YarnException, IOException {
rmClient.registerApplicationMaster("Host",10000,"");
testContainerManagement(nmClient,allocateContainers(rmClient,5));
rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainersOnStop(true);
assertTrue(nmClient.getCleanupRunningContainers().get());
nmClient.stop();
}
InternalCallVerifier BooleanVerifier
@Test public void testHasDelegationTokens() throws Exception {
TimelineAuthenticator authenticator=new TimelineAuthenticator();
Assert.assertFalse(authenticator.hasDelegationToken(new URL("http://localhost:8/resource")));
Assert.assertFalse(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?other=xxxx")));
Assert.assertTrue(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?delegation=yyyy")));
Assert.assertTrue(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?other=xxxx&delegation=yyyy")));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testPostEntitiesConnectionRefused() throws Exception {
mockClientResponse(client,null,false,true);
try {
client.putEntities(generateEntity());
Assert.fail("RuntimeException is expected");
}
catch ( RuntimeException re) {
Assert.assertTrue(re instanceof ClientHandlerException);
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testPostEntitiesNoResponse() throws Exception {
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
client.putEntities(generateEntity());
Assert.fail("Exception is expected");
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get the response from the timeline server."));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplications() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
List reports=client.getApplications();
Assert.assertEquals(reports,expectedReports);
Set appTypes=new HashSet();
appTypes.add("YARN");
appTypes.add("NON-YARN");
reports=client.getApplications(appTypes,null);
Assert.assertEquals(reports.size(),2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports.get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports.get(0).getApplicationType().equals("NON-YARN")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
EnumSet appStates=EnumSet.noneOf(YarnApplicationState.class);
appStates.add(YarnApplicationState.FINISHED);
appStates.add(YarnApplicationState.FAILED);
reports=client.getApplications(null,appStates);
Assert.assertEquals(reports.size(),2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports.get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports.get(0).getApplicationType().equals("NON-MAPREDUCE")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
reports=client.getApplications(appTypes,appStates);
Assert.assertEquals(reports.size(),1);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
client.stop();
}
IterativeVerifier UtilityVerifier BooleanVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testSubmitApplication(){
Configuration conf=new Configuration();
conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,100);
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
YarnApplicationState[] exitStates=new YarnApplicationState[]{YarnApplicationState.SUBMITTED,YarnApplicationState.ACCEPTED,YarnApplicationState.RUNNING,YarnApplicationState.FINISHED,YarnApplicationState.FAILED,YarnApplicationState.KILLED};
ApplicationSubmissionContext contextWithoutApplicationId=mock(ApplicationSubmissionContext.class);
try {
client.submitApplication(contextWithoutApplicationId);
Assert.fail("Should throw the ApplicationIdNotProvidedException");
}
catch ( YarnException e) {
Assert.assertTrue(e instanceof ApplicationIdNotProvidedException);
Assert.assertTrue(e.getMessage().contains("ApplicationId is not provided in ApplicationSubmissionContext"));
}
catch ( IOException e) {
Assert.fail("IOException is not expected.");
}
for (int i=0; i < exitStates.length; ++i) {
ApplicationSubmissionContext context=mock(ApplicationSubmissionContext.class);
ApplicationId applicationId=ApplicationId.newInstance(System.currentTimeMillis(),i);
when(context.getApplicationId()).thenReturn(applicationId);
((MockYarnClient)client).setYarnApplicationState(exitStates[i]);
try {
client.submitApplication(context);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
catch ( IOException e) {
Assert.fail("Exception is not expected.");
}
verify(((MockYarnClient)client).mockReport,times(4 * i + 4)).getYarnApplicationState();
}
client.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000l) public void testHelpMessage() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper=new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode=dumper.run(new String[]{});
assertTrue(exitCode == -1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Retrieve logs for completed YARN applications.");
pw.println("usage: yarn logs -applicationId [OPTIONS]");
pw.println();
pw.println("general options are:");
pw.println(" -appOwner AppOwner (assumed to be current user if");
pw.println(" not specified)");
pw.println(" -containerId ContainerId (must be specified if node");
pw.println(" address is specified)");
pw.println(" -nodeAddress NodeAddress in the format nodename:port");
pw.println(" (must be specified if container id is");
pw.println(" specified)");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testFailResultCodes() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setClass("fs.file.impl",LocalFileSystem.class,FileSystem.class);
LogCLIHelpers cliHelper=new LogCLIHelpers();
cliHelper.setConf(conf);
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper=new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode=dumper.run(new String[]{"-applicationId","application_0_0"});
assertTrue("Should return an error code",exitCode != 0);
exitCode=cliHelper.dumpAContainersLogs("application_0_0","container_0_0","nonexistentnode:1234","nobody");
assertTrue("Should return an error code",exitCode != 0);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testInvalidApplicationId() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI cli=new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode=cli.run(new String[]{"-applicationId","not_an_app_id"});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().startsWith("Invalid ApplicationId specified"));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testUnknownApplicationId() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClientUnknownApp();
LogsCLI cli=new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode=cli.run(new String[]{"-applicationId",ApplicationId.newInstance(1,1).toString()});
assertTrue(exitCode != 0);
assertTrue(sysErrStream.toString().startsWith("Unable to get ApplicationState"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"application","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"application","-kill",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
NodeId nodeId=NodeId.newInstance("host0",0);
result=cli.run(new String[]{"application","-status",nodeId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testContainersHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"container","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"container","-list",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ContainerId containerId=ContainerId.newInstance(appAttemptId,7);
result=cli.run(new String[]{"container","-status",containerId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetApplicationReportException() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
when(client.getApplicationReport(any(ApplicationId.class))).thenThrow(new ApplicationNotFoundException("History file for application" + applicationId + " is not found"));
try {
cli.run(new String[]{"application","-status",applicationId.toString()});
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
Assert.assertEquals("History file for application" + applicationId + " is not found",ex.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppAttemptsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"applicationattempt","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"applicationattempt","-status",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveApplicationAcrossQueues() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2);
int result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
assertEquals(0,result);
verify(client,times(0)).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class));
verify(sysOut).println("Application " + applicationId + " has already finished ");
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport);
result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
assertEquals(0,result);
verify(client).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class));
verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue");
verify(sysOut).println("Successfully completed move.");
doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).moveApplicationAcrossQueues(applicationId,"targetqueue");
cli=createAndGetAppCLI();
try {
result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
Assert.assertEquals("Application with id '" + applicationId + "' doesn't exist in RM.",ex.getMessage());
}
}
BooleanVerifier
@Test public void testGetConfKeyForRMInstance(){
assertTrue("RM instance id is not suffixed",HAUtil.getConfKeyForRMInstance(YarnConfiguration.RM_ADDRESS,conf).contains(HAUtil.getRMHAId(conf)));
assertFalse("RM instance id is suffixed",HAUtil.getConfKeyForRMInstance(YarnConfiguration.NM_ADDRESS,conf).contains(HAUtil.getRMHAId(conf)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSocketAddressForNMWithHA(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.NM_ADDRESS,"0.0.0.0:1234");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_ID,"rm1");
assertTrue(HAUtil.isHAEnabled(conf));
InetSocketAddress addr=conf.getSocketAddr(YarnConfiguration.NM_ADDRESS,YarnConfiguration.DEFAULT_NM_ADDRESS,YarnConfiguration.DEFAULT_NM_PORT);
assertEquals(1234,addr.getPort());
}
InternalCallVerifier BooleanVerifier
@Test public void testUpdateConnectAddr() throws Exception {
YarnConfiguration conf;
InetSocketAddress resourceTrackerConnectAddress;
InetSocketAddress serverAddress;
conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"yo.yo.yo");
serverAddress=new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress=conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,serverAddress);
assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"yo.yo.yo");
conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0");
serverAddress=new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress=conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,serverAddress);
assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
}
BooleanVerifier
@Test public void testRPCRuntimeExceptionUnwrapping(){
String message="RPCRuntimeExceptionUnwrapping";
RuntimeException re=new NullPointerException(message);
ServiceException se=new ServiceException(re);
Throwable t=null;
try {
RPCUtil.unwrapAndThrowException(se);
}
catch ( Throwable thrown) {
t=thrown;
}
Assert.assertTrue(NullPointerException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
BooleanVerifier
@Test public void testRPCServiceExceptionUnwrapping(){
String message="ServiceExceptionMessage";
ServiceException se=new ServiceException(message);
Throwable t=null;
try {
RPCUtil.unwrapAndThrowException(se);
}
catch ( Throwable thrown) {
t=thrown;
}
Assert.assertTrue(IOException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
BooleanVerifier
@Test public void testRPCIOExceptionUnwrapping(){
String message="DirectIOExceptionMessage";
IOException ioException=new FileNotFoundException(message);
ServiceException se=new ServiceException(ioException);
Throwable t=null;
try {
RPCUtil.unwrapAndThrowException(se);
}
catch ( Throwable thrown) {
t=thrown;
}
Assert.assertTrue(FileNotFoundException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
TestCleaner APIUtilityVerifier BranchVerifier BooleanVerifier HybridVerifier
@After public void tearDown() throws IOException, InterruptedException {
if (zks != null) {
ZKDatabase zkDb=zks.getZKDatabase();
factory.shutdown();
try {
zkDb.close();
}
catch ( IOException ie) {
}
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
Assert.assertTrue("waiting for server down",waitForServerDown("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
}
APIUtilityVerifier TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException, InterruptedException {
System.setProperty("zookeeper.preAllocSize","100");
FileTxnLog.setPreallocSize(100 * 1024);
if (!BASETEST.exists()) {
BASETEST.mkdirs();
}
File dataDir=createTmpDir(BASETEST);
zks=new ZooKeeperServer(dataDir,dataDir,3000);
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
if (factory == null) {
factory=new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress(PORT),maxCnxns);
}
factory.startup(zks);
Assert.assertTrue("waiting for server up",waitForServerUp("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
InternalCallVerifier BooleanVerifier
@Test public void testRefreshLogRetentionSettings() throws IOException {
long now=System.currentTimeMillis();
long before2000Secs=now - (2000 * 1000);
long before50Secs=now - (50 * 1000);
String root="mockfs://foo/";
String remoteRootLogDir=root + "tmp/logs";
String suffix="logs";
final Configuration conf=new Configuration();
conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class);
conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED,"true");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,"1800");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,"1");
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,suffix);
Path rootPath=new Path(root);
FileSystem rootFs=rootPath.getFileSystem(conf);
FileSystem mockFs=((FilterFileSystem)rootFs).getRawFileSystem();
Path remoteRootLogPath=new Path(remoteRootLogDir);
Path userDir=new Path(remoteRootLogPath,"me");
FileStatus userDirStatus=new FileStatus(0,true,0,0,before50Secs,userDir);
when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[]{userDirStatus});
Path userLogDir=new Path(userDir,suffix);
Path app1Dir=new Path(userLogDir,"application_1_1");
FileStatus app1DirStatus=new FileStatus(0,true,0,0,before2000Secs,app1Dir);
Path app2Dir=new Path(userLogDir,"application_1_2");
FileStatus app2DirStatus=new FileStatus(0,true,0,0,before50Secs,app2Dir);
when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[]{app1DirStatus,app2DirStatus});
Path app1Log1=new Path(app1Dir,"host1");
FileStatus app1Log1Status=new FileStatus(10,false,1,1,before2000Secs,app1Log1);
when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[]{app1Log1Status});
Path app2Log1=new Path(app2Dir,"host1");
FileStatus app2Log1Status=new FileStatus(10,false,1,1,before50Secs,app2Log1);
when(mockFs.listStatus(app2Dir)).thenReturn(new FileStatus[]{app2Log1Status});
AggregatedLogDeletionService deletionSvc=new AggregatedLogDeletionService(){
@Override protected Configuration createConf(){
return conf;
}
}
;
deletionSvc.init(conf);
deletionSvc.start();
verify(mockFs,timeout(10000)).delete(app1Dir,true);
verify(mockFs,timeout(3000).times(0)).delete(app2Dir,true);
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,"50");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,"2");
Assert.assertTrue(2000l != deletionSvc.getCheckIntervalMsecs());
deletionSvc.refreshLogRetentionSettings();
Assert.assertTrue(2000l == deletionSvc.getCheckIntervalMsecs());
verify(mockFs,timeout(10000)).delete(app2Dir,true);
deletionSvc.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testContainerLogsFileAccess() throws IOException {
Assume.assumeTrue(NativeIO.isAvailable());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
File workDir=new File(testWorkDir,"testContainerLogsFileAccess1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
String data="Log File content for container : ";
ApplicationId applicationId=ApplicationId.newInstance(1,1);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId testContainerId1=ContainerId.newInstance(applicationAttemptId,1);
Path appDir=new Path(srcFileRoot,testContainerId1.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath1=new Path(appDir,testContainerId1.toString());
String stdout="stdout";
String stderr="stderr";
writeSrcFile(srcFilePath1,stdout,data + testContainerId1.toString() + stdout);
writeSrcFile(srcFilePath1,stderr,data + testContainerId1.toString() + stderr);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId1);
String randomUser="randomUser";
LogValue logValue=spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId1,randomUser));
when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
BufferedReader in=new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath())));
String line;
StringBuffer sb=new StringBuffer("");
while ((line=in.readLine()) != null) {
LOG.info(line);
sb.append(line);
}
line=sb.toString();
String expectedOwner=ugi.getShortUserName();
if (Path.WINDOWS) {
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
String stdoutFile1=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stderr}));
String message1="Owner '" + expectedOwner + "' for path "+ stdoutFile1+ " did not match expected owner '"+ randomUser+ "'";
String stdoutFile2=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stdout}));
String message2="Owner '" + expectedOwner + "' for path "+ stdoutFile2+ " did not match expected owner '"+ ugi.getShortUserName()+ "'";
Assert.assertTrue(line.contains(message1));
Assert.assertFalse(line.contains(message2));
Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr));
Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReadAcontainerLogs1() throws Exception {
Configuration conf=new Configuration();
File workDir=new File(testWorkDir,"testReadAcontainerLogs1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
ContainerId testContainerId=TestContainerId.newContainerId(1,1,1,1);
Path t=new Path(srcFileRoot,testContainerId.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath=new Path(t,testContainerId.toString());
int numChars=80000;
writeSrcFile(srcFilePath,"stdout",numChars);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId);
LogValue logValue=new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId,ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
FileStatus fsStatus=fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",FsPermission.createImmutable((short)0640),fsStatus.getPermission());
LogReader logReader=new LogReader(conf,remoteAppLogFile);
LogKey rLogKey=new LogKey();
DataInputStream dis=logReader.next(rLogKey);
Writer writer=new StringWriter();
LogReader.readAcontainerLogs(dis,writer);
String s=writer.toString();
int expectedLength="\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length()+ numChars;
Assert.assertTrue("LogType not matched",s.contains("LogType:stdout"));
Assert.assertTrue("LogLength not matched",s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched",s.contains("Log Contents"));
StringBuilder sb=new StringBuilder();
for (int i=0; i < numChars; i++) {
sb.append(filler);
}
String expectedContent=sb.toString();
Assert.assertTrue("Log content incorrect",s.contains(expectedContent));
Assert.assertEquals(expectedLength,s.length());
}
APIUtilityVerifier BooleanVerifier
/**
* try to read bad logs
* @throws Exception
*/
@Test public void testBadLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"owner");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("Logs not available for entity. Aggregation may not be complete, Check back later or try the nodemanager at localhost:1234"));
}
APIUtilityVerifier BranchVerifier BooleanVerifier
/**
* Log files was deleted.
* @throws Exception
*/
@Test public void testNoLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
File f=new File("target/logs/logs/application_0_0001/container_0_0001_01_000001");
if (!f.exists()) {
assertTrue(f.mkdirs());
}
writeLog(configuration,"admin");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("No logs available for container container_0_0001_01_000001"));
}
APIUtilityVerifier BooleanVerifier
/**
* All ok and the AggregatedLogsBlockFor should aggregate logs and show it.
* @throws Exception
*/
@Test public void testAggregatedLogsBlock() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"admin");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("test log1"));
assertTrue(out.contains("test log2"));
assertTrue(out.contains("test log3"));
}
APIUtilityVerifier BooleanVerifier
/**
* Bad user. User 'owner' is trying to read logs without access
*/
@Test public void testAccessDenied() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"owner");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"owner","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("User [owner] is not authorized to view the logs for entity"));
}
BooleanVerifier
@Test public void testClusterWorks() throws YarnException, InterruptedException {
assertTrue("NMs fail to connect to the RM",cluster.waitForNodeManagersToConnect(5000));
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before public void setup() throws IOException, InterruptedException {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"localhost:0");
cluster=new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),2,1,1,1);
cluster.init(conf);
cluster.start();
cluster.getResourceManager(0).getRMContext().getRMAdminService().transitionToActive(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER));
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
}
InternalCallVerifier BooleanVerifier
@Test public void testMassiveWriteContainerHistoryData() throws IOException {
LOG.info("Starting testMassiveWriteContainerHistoryData");
long mb=1024 * 1024;
long usedDiskBefore=fs.getContentSummary(fsWorkingPath).getLength() / mb;
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
for (int i=1; i <= 100000; ++i) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
writeApplicationFinishData(appId);
long usedDiskAfter=fs.getContentSummary(fsWorkingPath).getLength() / mb;
Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testWriteAfterApplicationFinish() throws IOException {
LOG.info("Starting testWriteAfterApplicationFinish");
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerStartData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationAttemptHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numAppAttempts=5;
writeApplicationStartData(appId);
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
}
Assert.assertEquals(numAppAttempts,store.getApplicationAttempts(appId).size());
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
ApplicationAttemptHistoryData data=store.getApplicationAttempt(appAttemptId);
Assert.assertNotNull(data);
Assert.assertEquals(appAttemptId.toString(),data.getHost());
Assert.assertEquals(appAttemptId.toString(),data.getDiagnosticsInfo());
}
writeApplicationFinishData(appId);
appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numApps=5;
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
}
Assert.assertEquals(numApps,store.getAllApplications().size());
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
ApplicationHistoryData data=store.getApplication(appId);
Assert.assertNotNull(data);
Assert.assertEquals(appId.toString(),data.getApplicationName());
Assert.assertEquals(appId.toString(),data.getDiagnosticsInfo());
}
appId=ApplicationId.newInstance(0,1);
try {
writeApplicationStartData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testMassiveWriteContainerHistory() throws IOException {
long mb=1024 * 1024;
Runtime runtime=Runtime.getRuntime();
long usedMemoryBefore=(runtime.totalMemory() - runtime.freeMemory()) / mb;
int numContainers=100000;
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
for (int i=1; i <= numContainers; ++i) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
long usedMemoryAfter=(runtime.totalMemory() - runtime.freeMemory()) / mb;
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteContainerHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
writeApplicationAttemptStartData(appAttemptId);
int numContainers=5;
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
Assert.assertEquals(numContainers,store.getContainers(appAttemptId).size());
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
ContainerHistoryData data=store.getContainer(containerId);
Assert.assertNotNull(data);
Assert.assertEquals(Priority.newInstance(containerId.getId()),data.getPriority());
Assert.assertEquals(containerId.toString(),data.getDiagnosticsInfo());
}
ContainerHistoryData masterContainer=store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
Assert.assertEquals(ContainerId.newInstance(appAttemptId,1),masterContainer.getContainerId());
writeApplicationAttemptFinishData(appAttemptId);
containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerStartData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=5000) public void testRunCommandNoPriority() throws Exception {
Configuration conf=new Configuration();
String[] command=ContainerExecutor.getRunCommand("echo","group1",conf);
assertTrue("first command should be the run command for the platform",command[0].equals(Shell.WINUTILS) || command[0].equals("bash"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testContainerLaunchError() throws IOException, InterruptedException {
Path localDir=new Path(BASE_TMP_PATH,"localDir");
List localDirs=new ArrayList();
localDirs.add(localDir.toString());
List logDirs=new ArrayList();
Path logDir=new Path(BASE_TMP_PATH,"logDir");
logDirs.add(logDir.toString());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir.toString());
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir.toString());
FileContext lfs=FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor mockExec=spy(new DefaultContainerExecutor(lfs));
mockExec.setConf(conf);
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
String diagnostics=(String)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("No such file or directory"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
String appSubmitter="nobody";
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
try {
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("No such file or directory"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(cId.getApplicationAttemptId()).thenReturn(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),0));
when(context.getEnvironment()).thenReturn(env);
mockExec.createUserLocalDirs(localDirs,appSubmitter);
mockExec.createUserCacheDirs(localDirs,appSubmitter);
mockExec.createAppDirs(localDirs,appSubmitter,appId);
mockExec.createAppLogDirs(appId,logDirs);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=localDir;
Path pidFile=new Path(workDir,"pid.txt");
mockExec.init();
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,localDirs,localDirs);
Assert.assertNotSame(0,ret);
}
finally {
mockExec.deleteAsUser(appSubmitter,localDir);
mockExec.deleteAsUser(appSubmitter,logDir);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testAbsDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,20);
createDirs(new Path("."),dirs);
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
del.init(conf);
del.start();
try {
for ( Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,null);
}
int msecToWait=20 * 1000;
for ( Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(p));
}
}
finally {
del.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testStopWithDelayedTasks() throws Exception {
DeletionService del=new DeletionService(Mockito.mock(ContainerExecutor.class));
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,60);
try {
del.init(conf);
del.start();
del.delete("dingo",new Path("/does/not/exist"));
}
finally {
del.stop();
}
assertTrue(del.isTerminated());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRelativeDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor());
try {
del.init(new Configuration());
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
int msecToWait=20 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testFileDeletionTaskDependency() throws Exception {
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
del.init(conf);
del.start();
try {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,2);
createDirs(new Path("."),dirs);
List subDirs=buildDirs(r,dirs.get(0),2);
FileDeletionTask dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(0),new Path[]{});
List deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
int msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(dirs.get(0)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(dirs.get(0)));
subDirs=buildDirs(r,dirs.get(1),2);
subDirs.add(new Path(dirs.get(1),"absentFile"));
dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(1),new Path[]{});
deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
deletionTasks.get(2).setSuccess(false);
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(subDirs.get(0)) || lfs.util().exists(subDirs.get(1)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertTrue(lfs.util().exists(dirs.get(1)));
}
finally {
del.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testNoDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,20);
createDirs(new Path("."),dirs);
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,-1);
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
try {
del.init(conf);
del.start();
for ( Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,null);
}
int msecToWait=20 * 1000;
for ( Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait-=100;
}
assertTrue(lfs.util().exists(p));
}
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRecovery() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,1);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
try {
del.init(conf);
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
del.stop();
del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
del.init(conf);
del.start();
int msecToWait=10 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.close();
stateStore.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateDirectories() throws IOException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext localFs=FileContext.getLocalFSFileContext(conf);
String dirA=new File(testDir,"dirA").getPath();
String dirB=new File(dirA,"dirB").getPath();
String dirC=new File(testDir,"dirC").getPath();
Path pathC=new Path(dirC);
FsPermission permDirC=new FsPermission((short)0710);
localFs.mkdir(pathC,null,true);
localFs.setPermission(pathC,permDirC);
String[] dirs={dirA,dirB,dirC};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm=FsPermission.getDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult=dc.createNonExistentDirs(localFs,defaultPerm);
Assert.assertTrue(createResult);
FileStatus status=localFs.getFileStatus(new Path(dirA));
Assert.assertEquals("local dir parent not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(new Path(dirB));
Assert.assertEquals("local dir not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(pathC);
Assert.assertEquals("existing local directory permissions modified",permDirC,status.getPermission());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testConcurrentAccess() throws IOException {
Configuration conf=new Configuration();
String[] dirs={testFile.getPath()};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
List list=dc.getGoodDirs();
ListIterator li=list.listIterator();
Assert.assertTrue("checkDirs did not remove test file from directory list",dc.checkDirs());
li.next();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testContainerKill() throws Exception {
if (!shouldRun()) {
return;
}
final ContainerId sleepId=getNextContainerId();
Thread t=new Thread(){
public void run(){
try {
runAndBlock(sleepId,"sleep","100");
}
catch ( IOException e) {
LOG.warn("Caught exception while running sleep",e);
}
}
}
;
t.setDaemon(true);
t.start();
assertTrue(t.isAlive());
String pid=null;
int count=10;
while ((pid=exec.getProcessId(sleepId)) == null && count > 0) {
LOG.info("Sleeping for 200 ms before checking for pid ");
Thread.sleep(200);
count--;
}
assertNotNull(pid);
LOG.info("Going to killing the process.");
exec.signalContainer(appSubmitter,pid,Signal.TERM);
LOG.info("sleeping for 100ms to let the sleep be killed");
Thread.sleep(100);
assertFalse(t.isAlive());
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLaunchError() throws IOException {
File f=new File("./src/test/resources/mock-container-executer-with-error");
if (!FileUtil.canExecute(f)) {
FileUtil.setExecutable(f,true);
}
String executorPath=f.getAbsolutePath();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath);
conf.set(YarnConfiguration.NM_LOCAL_DIRS,"file:///bin/echo");
conf.set(YarnConfiguration.NM_LOG_DIRS,"file:///dev/null");
mockExec=spy(new LinuxContainerExecutor());
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
String diagnostics=(String)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("badcommand"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
mockExec.setConf(conf);
String appSubmitter="nobody";
String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue());
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("badcommand"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=new Path("/tmp");
Path pidFile=new Path(workDir,"pid.txt");
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs());
Assert.assertNotSame(0,ret);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams());
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeHealthScriptShouldRun() throws IOException {
Assert.assertFalse("By default Health script should not have started",NodeHealthScriptRunner.shouldRun(new Configuration()));
Configuration conf=getConfForNodeHealthScript();
Assert.assertFalse("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
conf.addResource(nodeHealthConfigFile.getName());
writeNodeHealthScriptFile("",false);
Assert.assertFalse("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
writeNodeHealthScriptFile("",true);
Assert.assertTrue("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeHealthScript() throws Exception {
RecordFactory factory=RecordFactoryProvider.getRecordFactory(null);
NodeHealthStatus healthStatus=factory.newRecordInstance(NodeHealthStatus.class);
String errorScript="echo ERROR\n echo \"Tracker not healthy\"";
String normalScript="echo \"I am all fine\"";
String timeOutScript=Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\"" : "sleep 4\necho \"I am fine\"";
Configuration conf=getConfForNodeHealthScript();
conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
conf.addResource(nodeHealthConfigFile.getName());
writeNodeHealthScriptFile(normalScript,true);
NodeHealthCheckerService nodeHealthChecker=new NodeHealthCheckerService();
nodeHealthChecker.init(conf);
NodeHealthScriptRunner nodeHealthScriptRunner=nodeHealthChecker.getNodeHealthScriptRunner();
TimerTask timerTask=nodeHealthScriptRunner.getTimerTask();
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking initial healthy condition");
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(errorScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->Unhealthy");
Assert.assertFalse("Node health status reported healthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported healthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(normalScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking UnHealthy--->healthy");
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(timeOutScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->timeout");
Assert.assertFalse("Node health status reported healthy even after timeout",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node script time out message not propogated",healthStatus.getHealthReport().equals(NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG + NodeHealthCheckerService.SEPARATOR + nodeHealthChecker.getDiskHandler().getDisksHealthReport()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=2000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException {
nm=new MyNodeManager();
nm.start();
final ContainerManagementProtocol containerManager=nm.getContainerManager();
createFiles(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE,100);
localResourceDir.mkdirs();
ContainerLaunchContext containerLaunchContext=Records.newRecord(ContainerLaunchContext.class);
ContainerId cId=createContainerId();
URL localResourceUri=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath())));
LocalResource localResource=LocalResource.newInstance(localResourceUri,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,-1,localResourceDir.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,localResource);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
containerLaunchContext.setCommands(commands);
NodeId nodeId=nm.getNMContext().getNodeId();
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,TestContainerManager.createContainerToken(cId,0,nodeId,destinationFile,nm.getNMContext().getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
final StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
final UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString());
NMTokenIdentifier nmIdentifier=new NMTokenIdentifier(cId.getApplicationAttemptId(),nodeId,user,123);
currentUser.addTokenIdentifier(nmIdentifier);
currentUser.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws YarnException, IOException {
nm.getContainerManager().startContainers(allRequests);
return null;
}
}
);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds);
Container container=nm.getNMContext().getContainers().get(request.getContainerIds().get(0));
final int MAX_TRIES=20;
int numTries=0;
while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertEquals(ContainerState.DONE,container.getContainerState());
Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0);
Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0);
nm.stop();
nm=new MyNodeManager();
nm.start();
numTries=0;
while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0) && numTries < MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertTrue("After NM reboots, all local files should be deleted",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) == 0);
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_")));
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user,null,new String[]{destinationFile})));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null,ContainerLocalizer.USERCACHE + "_DEL_",new String[]{})));
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNMSentContainerStatusOnResync() throws Exception {
final ContainerStatus testCompleteContainer=TestNodeStatusUpdater.createContainerStatus(2,ContainerState.COMPLETE);
final Container container=TestNodeStatusUpdater.getMockContainer(testCompleteContainer);
NMContainerStatus report=createNMContainerStatus(2,ContainerState.COMPLETE);
when(container.getNMContainerStatus()).thenReturn(report);
NodeManager nm=new NodeManager(){
int registerCount=0;
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
return new TestNodeStatusUpdaterResync(context,dispatcher,healthChecker,metrics){
@Override protected ResourceTracker createResourceTracker(){
return new MockResourceTracker(){
@Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException {
if (registerCount == 0) {
try {
Assert.assertEquals(0,request.getNMContainerStatuses().size());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
getNMContext().getContainers().put(testCompleteContainer.getContainerId(),container);
}
else {
List statuses=request.getNMContainerStatuses();
try {
Assert.assertEquals(1,statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
}
registerCount++;
return super.registerNodeManager(request);
}
@Override public NodeHeartbeatResponse nodeHeartbeat( NodeHeartbeatRequest request){
List statuses=request.getNodeStatus().getContainersStatuses();
try {
Assert.assertEquals(1,statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
return YarnServerBuilderUtils.newNodeHeartbeatResponse(1,NodeAction.RESYNC,null,null,null,null,1000L);
}
}
;
}
}
;
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
try {
syncBarrier.await();
}
catch ( BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testBlockNewContainerRequestsOnStartAndResync() throws IOException, InterruptedException, YarnException {
NodeManager nm=new TestNodeManager2();
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
ContainerId cId=TestNodeManagerShutdown.createContainerId();
TestNodeManagerShutdown.startContainer(nm,cId,localFS,tmpDir,processStartFile);
nm.getNMDispatcher().getEventHandler().handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
try {
syncBarrier.await();
}
catch ( BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test(timeout=10000) public void testNMshutdownWhenResyncThrowException() throws IOException, InterruptedException, YarnException {
NodeManager nm=new TestNodeManager3();
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
Assert.assertEquals(1,((TestNodeManager3)nm).getNMRegistrationCount());
nm.getNMDispatcher().getEventHandler().handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
synchronized (isNMShutdownCalled) {
while (isNMShutdownCalled.get() == false) {
try {
isNMShutdownCalled.wait();
}
catch ( InterruptedException e) {
}
}
}
Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
nm.stop();
}
BooleanVerifier
@Test public void testStateStoreRemovalOnDecommission() throws IOException {
final File recoveryDir=new File(basedir,"nm-recovery");
nm=new TestNodeManager();
YarnConfiguration conf=createNMConfig();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR,recoveryDir.getAbsolutePath());
nm.init(conf);
nm.start();
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
nm.stop();
nm=null;
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
nm=new TestNodeManager();
nm.init(conf);
nm.start();
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
nm.getNMContext().setDecommissioned(true);
nm.stop();
nm=null;
Assert.assertFalse(recoveryDir.exists());
}
BranchVerifier BooleanVerifier
@Test public void testKillContainersOnShutdown() throws IOException, YarnException {
nm=new TestNodeManager();
nm.init(createNMConfig());
nm.start();
startContainer(nm,cId,localFS,tmpDir,processStartFile);
final int MAX_TRIES=20;
int numTries=0;
while (!processStartFile.exists() && numTries < MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
ex.printStackTrace();
}
numTries++;
}
nm.stop();
if (Shell.WINDOWS) {
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(cId.toString()));
}
else {
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
boolean foundSigTermMessage=false;
while (true) {
String line=reader.readLine();
if (line == null) {
break;
}
if (line.contains("SIGTERM")) {
foundSigTermMessage=true;
break;
}
}
Assert.assertTrue("Did not find sigterm message",foundSigTermMessage);
reader.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeDecommision() throws Exception {
nm=getNodeManager(NodeAction.SHUTDOWN);
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Assert.assertEquals(STATE.INITED,nm.getServiceState());
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
Assert.assertTrue(nm.getNMContext().getDecommissioned());
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=150000) public void testNMConnectionToRM() throws Exception {
final long delta=50000;
final long connectionWaitMs=5000;
final long connectionRetryIntervalMs=1000;
final long rmStartIntervalMS=2 * 1000;
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitMs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs);
NodeManagerWithCustomNodeStatusUpdater nmWithUpdater;
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,true);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
long waitStartTime=System.currentTimeMillis();
try {
nm.start();
Assert.fail("NM should have failed to start due to RM connect failure");
}
catch ( Exception e) {
long t=System.currentTimeMillis();
long duration=t - waitStartTime;
boolean waitTimeValid=(duration >= connectionWaitMs) && (duration < (connectionWaitMs + delta));
if (!waitTimeValid) {
throw new Exception("NM should have tried re-connecting to RM during " + "period of at least " + connectionWaitMs + " ms, but "+ "stopped retrying within "+ (connectionWaitMs + delta)+ " ms: "+ e,e);
}
}
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,false);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
NodeStatusUpdater updater=nmWithUpdater.getUpdater();
Assert.assertNotNull("Updater not yet created ",updater);
waitStartTime=System.currentTimeMillis();
try {
nm.start();
}
catch ( Exception ex) {
LOG.error("NM should have started successfully " + "after connecting to RM.",ex);
throw ex;
}
long duration=System.currentTimeMillis() - waitStartTime;
MyNodeStatusUpdater4 myUpdater=(MyNodeStatusUpdater4)updater;
Assert.assertTrue("NM started before updater triggered",myUpdater.isTriggered());
Assert.assertTrue("NM should have connected to RM after " + "the start interval of " + rmStartIntervalMS + ": actual "+ duration+ " "+ myUpdater,(duration >= rmStartIntervalMS));
Assert.assertTrue("NM should have connected to RM less than " + (rmStartIntervalMS + delta) + " milliseconds of RM starting up: actual "+ duration+ " "+ myUpdater,(duration < (rmStartIntervalMS + delta)));
}
IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testNMRegistration() throws InterruptedException {
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
return new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Object[] services=nm.getServices().toArray();
Object lastService=services[services.length - 1];
Assert.assertTrue("last service is NOT the node status updater",lastService instanceof NodeStatusUpdater);
new Thread(){
public void run(){
try {
nm.start();
}
catch ( Throwable e) {
TestNodeStatusUpdater.this.nmStartError=e;
throw new YarnRuntimeException(e);
}
}
}
.start();
System.out.println(" ----- thread already started.." + nm.getServiceState());
int waitCount=0;
while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) {
LOG.info("Waiting for NM to start..");
if (nmStartError != null) {
LOG.error("Error during startup. ",nmStartError);
Assert.fail(nmStartError.getCause().getMessage());
}
Thread.sleep(2000);
}
if (nm.getServiceState() != STATE.STARTED) {
Assert.fail("NodeManager failed to start");
}
waitCount=0;
while (heartBeatID <= 3 && waitCount++ != 200) {
Thread.sleep(1000);
}
Assert.assertFalse(heartBeatID <= 3);
Assert.assertEquals("Number of registered NMs is wrong!!",1,this.registeredNodes.size());
nm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRMVersionLessThanMinimum() throws InterruptedException {
final AtomicInteger numCleanups=new AtomicInteger(0);
YarnConfiguration conf=createNMConfig();
conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION,"3.0.0");
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.NORMAL;
myResourceTracker2.rmVersion="3.0.0";
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
nm.init(conf);
nm.start();
int waitCount=0;
while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
nm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testRecentlyFinishedContainers() throws Exception {
NodeManager nm=new NodeManager();
YarnConfiguration conf=new YarnConfiguration();
conf.set(NodeStatusUpdaterImpl.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,"10000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater=(NodeStatusUpdaterImpl)nm.getNodeStatusUpdater();
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
nodeStatusUpdater.addCompletedContainer(cId);
Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId));
long time1=System.currentTimeMillis();
int waitInterval=15;
while (waitInterval-- > 0 && nodeStatusUpdater.isContainerRecentlyStopped(cId)) {
nodeStatusUpdater.removeVeryOldStoppedContainersFromCache();
Thread.sleep(1000);
}
long time2=System.currentTimeMillis();
Assert.assertFalse(nodeStatusUpdater.isContainerRecentlyStopped(cId));
Assert.assertTrue((time2 - time1) >= 10000 && (time2 - time1) <= 250000);
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testApplicationKeepAlive() throws Exception {
MyNodeManager nm=new MyNodeManager();
try {
YarnConfiguration conf=createNMConfig();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true);
conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,4000l);
nm.init(conf);
nm.start();
while (heartBeatID < 12) {
Thread.sleep(1000l);
}
MyResourceTracker3 rt=(MyResourceTracker3)nm.getNodeStatusUpdater().getRMClient();
rt.context.getApplications().remove(rt.appId);
Assert.assertEquals(1,rt.keepAliveRequests.size());
int numKeepAliveRequests=rt.keepAliveRequests.get(rt.appId).size();
LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]");
Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3);
while (heartBeatID < 20) {
Thread.sleep(1000l);
}
int numKeepAliveRequests2=rt.keepAliveRequests.get(rt.appId).size();
Assert.assertEquals(numKeepAliveRequests,numKeepAliveRequests2);
}
finally {
if (nm.getServiceState() == STATE.STARTED) nm.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStopReentrant() throws Exception {
final AtomicInteger numCleanups=new AtomicInteger(0);
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.SHUTDOWN;
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
nm.stop();
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
Assert.assertEquals(numCleanups.get(),1);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception {
final long connectionWaitSecs=1000;
final long connectionRetryIntervalMs=1000;
YarnConfiguration conf=createNMConfig();
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitSecs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs);
conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS,5000);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS,1);
CyclicBarrier syncBarrier=new CyclicBarrier(2);
nm=new MyNodeManager2(syncBarrier,conf);
nm.init(conf);
nm.start();
ContainerId cId=TestNodeManagerShutdown.createContainerId();
FileContext localFS=FileContext.getLocalFSFileContext();
TestNodeManagerShutdown.startContainer(nm,cId,localFS,nmLocalDir,new File("start_file.txt"));
try {
syncBarrier.await(10000,TimeUnit.MILLISECONDS);
}
catch ( Exception e) {
}
Assert.assertFalse("Containers not cleaned up when NM stopped",assertionFailedInThread.get());
Assert.assertTrue(((MyNodeManager2)nm).isStopped);
Assert.assertTrue("calculate heartBeatCount based on" + " connectionWaitSecs and RetryIntervalSecs",heartBeatID == 2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr=new InetSocketAddress("0.0.0.0",8040);
LocalizerService server=new LocalizerService(locAddr);
try {
server.start();
Configuration conf=new Configuration();
YarnRPC rpc=YarnRPC.create(conf);
LocalizationProtocol client=(LocalizationProtocol)rpc.getProxy(LocalizationProtocol.class,locAddr,conf);
LocalizerStatus status=recordFactory.newRecordInstance(LocalizerStatus.class);
status.setLocalizerId("localizer0");
LocalizerHeartbeatResponse response=client.heartbeat(status);
assertEquals(dieHBResponse(),response);
}
finally {
server.stop();
}
assertTrue(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerHeartbeatResponseSerDe() throws Exception {
LocalizerHeartbeatResponse rsrcS=createLocalizerHeartbeatResponse();
assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl);
LocalizerHeartbeatResponsePBImpl rsrcPb=(LocalizerHeartbeatResponsePBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerHeartbeatResponseProto rsrcPbD=LocalizerHeartbeatResponseProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerHeartbeatResponse rsrcD=new LocalizerHeartbeatResponsePBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResourceSpecs().get(0).getResource());
assertEquals(createResource(),rsrcD.getResourceSpecs().get(0).getResource());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception {
LocalizerStatus rsrcS=createLocalizerStatus();
assertTrue(rsrcS instanceof LocalizerStatusPBImpl);
LocalizerStatusPBImpl rsrcPb=(LocalizerStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerStatusProto rsrcPbD=LocalizerStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerStatus rsrcD=new LocalizerStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals("localizer0",rsrcS.getLocalizerId());
assertEquals("localizer0",rsrcD.getLocalizerId());
assertEquals(createLocalResourceStatus(),rsrcS.getResourceStatus(0));
assertEquals(createLocalResourceStatus(),rsrcD.getResourceStatus(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception {
LocalResourceStatus rsrcS=createLocalResourceStatus();
assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
LocalResourceStatusPBImpl rsrcPb=(LocalResourceStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalResourceStatusProto rsrcPbD=LocalResourceStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalResourceStatus rsrcD=new LocalResourceStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResource());
assertEquals(createResource(),rsrcD.getResource());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAuxUnexpectedStop(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
aux.start();
Service s=aux.getServices().iterator().next();
s.stop();
assertEquals("Auxiliary service stopped, but AuxService unaffected.",STOPPED,aux.getServiceState());
assertTrue(aux.getServices().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuxEventDispatch(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
conf.setInt("A.expected.init",1);
conf.setInt("B.expected.stop",1);
final AuxServices aux=new AuxServices();
aux.init(conf);
aux.start();
ApplicationId appId1=ApplicationId.newInstance(0,65);
ByteBuffer buf=ByteBuffer.allocate(6);
buf.putChar('A');
buf.putInt(65);
buf.flip();
AuxServicesEvent event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,"user0",appId1,"Asrv",buf);
aux.handle(event);
ApplicationId appId2=ApplicationId.newInstance(0,66);
event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,"user0",appId2,"Bsrv",null);
aux.handle(event);
Collection servs=aux.getServices();
for ( AuxiliaryService serv : servs) {
ArrayList appIds=((LightService)serv).getAppIdsStopped();
assertEquals("app not properly stopped",1,appIds.size());
assertTrue("wrong app stopped",appIds.contains((Integer)66));
}
for ( AuxiliaryService serv : servs) {
assertNull(((LightService)serv).containerId);
assertNull(((LightService)serv).resource);
}
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId1,1);
ContainerTokenIdentifier cti=new ContainerTokenIdentifier(ContainerId.newInstance(attemptId,1),"","",Resource.newInstance(1,1),0,0,0,Priority.newInstance(0),0);
Container container=new ContainerImpl(null,null,null,null,null,null,cti);
ContainerId containerId=container.getContainerId();
Resource resource=container.getResource();
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
((LightService)serv).containerId=null;
((LightService)serv).resource=null;
}
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testValidAuxServiceName(){
final AuxServices aux=new AuxServices();
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv1","Bsrv_2"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv1"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv_2"),ServiceB.class,Service.class);
try {
aux.init(conf);
}
catch ( Exception ex) {
Assert.fail("Should not receive the exception.");
}
final AuxServices aux1=new AuxServices();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"1Asrv1"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"1Asrv1"),ServiceA.class,Service.class);
try {
aux1.init(conf);
Assert.fail("Should receive the exception.");
}
catch ( Exception ex) {
assertTrue(ex.getMessage().contains("The ServiceName: 1Asrv1 set in " + "yarn.nodemanager.aux-services is invalid.The valid service name " + "should only contain a-zA-Z0-9_ and can not start with numbers"));
}
}
BooleanVerifier
@Test public void testContainerLaunchFromPreviousRM() throws IOException, InterruptedException, YarnException {
containerManager.start();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ContainerId cId1=createContainerId(0);
ContainerId cId2=createContainerId(0);
containerLaunchContext.setLocalResources(new HashMap());
StartContainerRequest startRequest1=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId1,ResourceManagerConstants.RM_INVALID_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(startRequest1);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
boolean catchException=false;
try {
StartContainersResponse response=containerManager.startContainers(allRequests);
if (response.getFailedRequests().containsKey(cId1)) {
throw response.getFailedRequests().get(cId1).deSerialize();
}
}
catch ( Throwable e) {
e.printStackTrace();
catchException=true;
Assert.assertTrue(e.getMessage().contains("Container " + cId1 + " rejected as it is allocated by a previous RM"));
Assert.assertTrue(e.getClass().getName().equalsIgnoreCase(InvalidContainerException.class.getName()));
}
Assert.assertTrue(catchException);
StartContainerRequest startRequest2=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId2,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list2=new ArrayList();
list.add(startRequest2);
StartContainersRequest allRequests2=StartContainersRequest.newInstance(list2);
containerManager.startContainers(allRequests2);
boolean noException=true;
try {
containerManager.startContainers(allRequests2);
}
catch ( YarnException e) {
noException=false;
}
Assert.assertTrue(noException);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException {
containerManager.start();
File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile();
ContainerId cId=createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@echo Hello World!> " + processStartFile);
fileWriter.println("@echo " + cId + ">> "+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
}
else {
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!",reader.readLine());
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
List containerIds=new ArrayList();
containerIds.add(cId);
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus());
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersLaunch() throws Exception {
containerManager.start();
List list=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
long identifier=0;
if ((i & 1) == 0) identifier=ResourceManagerConstants.RM_INVALID_IDENTIFIER;
else identifier=DUMMY_RM_IDENTIFIER;
Token containerToken=createContainerToken(cId,identifier,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
list.add(request);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(list);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertEquals(5,response.getSuccessfullyStartedContainers().size());
for ( ContainerId id : response.getSuccessfullyStartedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,response.getFailedRequests().size());
for ( Map.Entry entry : response.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Container " + entry.getKey() + " rejected as it is allocated by a previous RM"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersStopAndGetStatus() throws Exception {
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
List containerIds=new ArrayList();
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
String user=null;
if ((i & 1) == 0) {
user="Fail";
}
else {
user="Pass";
}
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
containerIds.add(cId);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
containerManager.startContainers(requestList);
GetContainerStatusesRequest statusRequest=GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse statusResponse=containerManager.getContainerStatuses(statusRequest);
Assert.assertEquals(5,statusResponse.getContainerStatuses().size());
for ( ContainerStatus status : statusResponse.getContainerStatuses()) {
Assert.assertEquals(1,status.getContainerId().getId() & 1);
}
Assert.assertEquals(5,statusResponse.getFailedRequests().size());
for ( Map.Entry entry : statusResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
StopContainersResponse stopResponse=containerManager.stopContainers(stopRequest);
Assert.assertEquals(5,stopResponse.getSuccessfullyStoppedContainers().size());
for ( ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,stopResponse.getFailedRequests().size());
for ( Map.Entry entry : stopResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerSetup() throws Exception {
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
for ( File f : new File[]{localDir,sysDir,userCacheDir,appDir,appSysDir,containerDir,containerSysDir}) {
Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!",f.exists());
Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",f.isDirectory());
}
Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",targetFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(targetFile));
Assert.assertEquals("Hello World!",reader.readLine());
Assert.assertEquals(null,reader.readLine());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStartContainerFailureWithUnknownAuxService() throws Exception {
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"existService"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"existService"),ServiceA.class,Service.class);
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
Map serviceData=new HashMap();
String serviceName="non_exist_auxService";
serviceData.put(serviceName,ByteBuffer.wrap(serviceName.getBytes()));
containerLaunchContext.setServiceData(serviceData);
ContainerId cId=createContainerId(0);
String user="start_container_fail";
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertTrue(response.getFailedRequests().size() == 1);
Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0);
Assert.assertTrue(response.getFailedRequests().containsKey(cId));
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage().contains("The auxService:" + serviceName + " does not exist"));
}
APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerManagerInitialization() throws IOException {
containerManager.start();
InetAddress localAddr=InetAddress.getLocalHost();
String fqdn=localAddr.getCanonicalHostName();
if (!localAddr.getHostAddress().equals(fqdn)) {
Assert.assertEquals(fqdn,context.getNodeId().getHost());
}
boolean throwsException=false;
try {
List containerIds=new ArrayList();
ContainerId id=createContainerId(0);
containerIds.add(id);
GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse response=containerManager.getContainerStatuses(request);
if (response.getFailedRequests().containsKey(id)) {
throw response.getFailedRequests().get(id).deSerialize();
}
}
catch ( Throwable e) {
throwsException=true;
}
Assert.assertTrue(throwsException);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocalFilesCleanup() throws InterruptedException, IOException, YarnException {
delSrvc=new DeletionService(exec);
delSrvc.init(conf);
containerManager=createContainerManager(delSrvc);
containerManager.init(conf);
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(FileContext.getLocalFSFileContext().makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.RUNNING);
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
Assert.assertTrue("AppDir " + appDir.getAbsolutePath() + " doesn't exist!!",appDir.exists());
Assert.assertTrue("AppSysDir " + appSysDir.getAbsolutePath() + " doesn't exist!!",appSysDir.exists());
for ( File f : new File[]{containerDir,containerSysDir}) {
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
containerManager.handle(new CMgrCompletedAppsEvent(Arrays.asList(new ApplicationId[]{appId}),CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.FINISHED);
for ( File f : new File[]{appDir,containerDir,appSysDir,containerSysDir}) {
int timeout=0;
while (f.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
int timeout=0;
while (targetFile.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationRecovery() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.NM_ADDRESS,"localhost:1234");
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"yarn_admin_user");
NMStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
Context context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
ContainerManagerImpl cm=createContainerManager(context);
cm.init(conf);
cm.start();
MasterKey masterKey=new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[]{new Integer(123).byteValue()}));
context.getContainerTokenSecretManager().setMasterKey(masterKey);
context.getNMTokenSecretManager().setMasterKey(masterKey);
String appUser="app_user1";
String modUser="modify_user1";
String viewUser="view_user1";
String enemyUser="enemy_user";
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cid=ContainerId.newInstance(attemptId,1);
Map localResources=Collections.emptyMap();
Map containerEnv=Collections.emptyMap();
List containerCmds=Collections.emptyList();
Map serviceData=Collections.emptyMap();
Credentials containerCreds=new Credentials();
DataOutputBuffer dob=new DataOutputBuffer();
containerCreds.writeTokenStorageToStream(dob);
ByteBuffer containerTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
Map acls=new HashMap();
acls.put(ApplicationAccessType.MODIFY_APP,modUser);
acls.put(ApplicationAccessType.VIEW_APP,viewUser);
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,containerEnv,containerCmds,serviceData,containerTokens,acls);
StartContainersResponse startResponse=startContainer(context,cm,cid,clc);
assertTrue(startResponse.getFailedRequests().isEmpty());
assertEquals(1,context.getApplications().size());
Application app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
List finishedApps=new ArrayList();
finishedApps.add(appId);
cm.handle(new CMgrCompletedAppsEvent(finishedApps,CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER));
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
assertEquals(app.getApplicationState(),ApplicationState.FINISHED);
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertTrue(context.getApplications().isEmpty());
cm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnCompletedContainers(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(5,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
reset(wa.localizerBus);
wa.containerFinished(0);
wa.containerFinished(1);
wa.containerFinished(2);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
wa.appFinished();
assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState());
verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app)));
wa.appResourcesCleanedup();
for ( Container container : wa.containers) {
ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId());
waitForContainerTokenToExpire(identifier);
Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier));
}
assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnRunningContainers(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(4,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
wa.containerFinished(0);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
wa.appFinished();
assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
for (int i=1; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(argThat(new ContainerKillMatcher(wa.containers.get(i).getContainerId())));
}
wa.containerFinished(1);
assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState());
assertEquals(1,wa.app.getContainers().size());
reset(wa.localizerBus);
wa.containerFinished(2);
assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app)));
verify(wa.auxBus).handle(refEq(new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,wa.appId)));
wa.appResourcesCleanedup();
for ( Container container : wa.containers) {
ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId());
waitForContainerTokenToExpire(identifier);
Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier));
}
assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testResourceLocalizedOnLocalizationFailed() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(16,314159265358979L,4344,"yak");
wc.initContainer();
int failCount=wc.getLocalResourceCount() / 2;
if (failCount == 0) {
failCount=1;
}
wc.failLocalizeResources(failCount);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.localizeResourcesFromInvalidState(failCount);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
Assert.assertTrue(wc.getDiagnostics().contains(FAKE_LOCALIZATION_ERROR));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizing() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(14,314159265358979L,4344,"yak");
wc.initContainer();
assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus());
assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest"));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnNew() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(13,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus());
assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest"));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Verify container launch when all resources already cached.
*/
@Test public void testLocalizationLaunch() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(8,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.initContainer();
Map> localPaths=wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
assertNotNull(wc.c.getLocalizedResources());
for ( Entry> loc : wc.c.getLocalizedResources().entrySet()) {
assertEquals(localPaths.remove(loc.getKey()),loc.getValue());
}
assertTrue(localPaths.isEmpty());
final WrappedContainer wcf=wc;
ArgumentMatcher matchesContainerLaunch=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainersLauncherEvent launchEvent=(ContainersLauncherEvent)o;
return wcf.c == launchEvent.getContainer();
}
}
;
verify(wc.launcherBus).handle(argThat(matchesContainerLaunch));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidEnvSyntaxDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile);
FileUtil.setExecutable(shellFile,true);
Map env=new HashMap();
env.put("APPLICATION_WORKFLOW_CONTEXT","{\"workflowId\":\"609f91c5cd83\"," + "\"workflowName\":\"\n\ninsert table " + "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, ");
List commands=new ArrayList();
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Map cmdEnv=new HashMap();
cmdEnv.put("LANG","C");
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir,cmdEnv);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? "is not recognized as an internal or external command" : "command not found"));
Assert.assertTrue(shexc.getExitCode() != 0);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidSymlinkDiagnostics() throws IOException {
File shellFile=null;
File tempFile=null;
String symLink=Shell.WINDOWS ? "test.cmd" : "test";
File symLinkFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
tempFile=Shell.appendScriptExtension(tmpDir,"temp");
String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\"";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(timeoutCommand);
writer.close();
Map> resources=new HashMap>();
Path invalidPath=new Path(shellFile.getAbsolutePath() + "randomPath");
resources.put(invalidPath,Arrays.asList(symLink));
FileOutputStream fos=new FileOutputStream(tempFile);
Map env=new HashMap();
List commands=new ArrayList();
if (Shell.WINDOWS) {
commands.add("cmd");
commands.add("/c");
commands.add("\"" + symLink + "\"");
}
else {
commands.add("/bin/sh ./\\\"" + symLink + "\\\"");
}
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile,true);
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertNotNull(diagnostics);
Assert.assertTrue(shexc.getExitCode() != 0);
symLinkFile=new File(tmpDir,symLink);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
if (tempFile != null && tempFile.exists()) {
tempFile.delete();
}
if (symLinkFile != null && symLinkFile.exists()) {
symLinkFile.delete();
}
}
}
BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test(timeout=10000) public void testCallFailureWithNullLocalizedResources(){
Container container=mock(Container.class);
when(container.getContainerId()).thenReturn(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(),1),1),1));
ContainerLaunchContext clc=mock(ContainerLaunchContext.class);
when(clc.getCommands()).thenReturn(Collections.emptyList());
when(container.getLaunchContext()).thenReturn(clc);
when(container.getLocalizedResources()).thenReturn(null);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler eventHandler=new EventHandler(){
public void handle( Event event){
Assert.assertTrue(event instanceof ContainerExitEvent);
ContainerExitEvent exitEvent=(ContainerExitEvent)event;
Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,exitEvent.getType());
}
}
;
when(dispatcher.getEventHandler()).thenReturn(eventHandler);
ContainerLaunch launch=new ContainerLaunch(context,new Configuration(),dispatcher,exec,null,container,dirsHandler,containerManager);
launch.call();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
String command=Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(command);
writer.close();
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile,true);
Map env=new HashMap();
List commands=new ArrayList();
commands.add(command);
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains("error"));
Assert.assertTrue(shexc.getOutput().contains("hello"));
Assert.assertTrue(shexc.getExitCode() == 2);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test(timeout=60000) public void testContainerEnvVariables() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
Map userSetEnv=new HashMap();
userSetEnv.put(Environment.CONTAINER_ID.name(),"user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(),"user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(),"user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(),"user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(),"user_set_LOCAL_DIR");
userSetEnv.put(Environment.USER.key(),"user_set_" + Environment.USER.key());
userSetEnv.put(Environment.LOGNAME.name(),"user_set_LOGNAME");
userSetEnv.put(Environment.PWD.name(),"user_set_PWD");
userSetEnv.put(Environment.HOME.name(),"user_set_HOME");
containerLaunchContext.setEnvironment(userSetEnv);
File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"env_vars.txt").getAbsoluteFile();
if (Shell.WINDOWS) {
fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.USER.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.PWD.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.HOME.$() + ">> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ "%>> "+ processStartFile);
}
fileWriter.println("@echo " + cId + ">> "+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
}
else {
fileWriter.write("\numask 0");
fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.USER.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.PWD.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.HOME.name() + " >> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ " >> "+ processStartFile);
}
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,Priority.newInstance(0),0));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
List localDirs=dirsHandler.getLocalDirs();
List logDirs=dirsHandler.getLogDirs();
List appDirs=new ArrayList(localDirs.size());
for ( String localDir : localDirs) {
Path usersdir=new Path(localDir,ContainerLocalizer.USERCACHE);
Path userdir=new Path(usersdir,user);
Path appsdir=new Path(userdir,ContainerLocalizer.APPCACHE);
appDirs.add(new Path(appsdir,appId.toString()));
}
List containerLogDirs=new ArrayList();
String relativeContainerLogDir=ContainerLaunch.getRelativeContainerLogDir(appId.toString(),cId.toString());
for ( String logDir : logDirs) {
containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
}
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(cId.toString(),reader.readLine());
Assert.assertEquals(context.getNodeId().getHost(),reader.readLine());
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),reader.readLine());
Assert.assertEquals(String.valueOf(HTTP_PORT),reader.readLine());
Assert.assertEquals(StringUtils.join(",",appDirs),reader.readLine());
Assert.assertEquals(user,reader.readLine());
Assert.assertEquals(user,reader.readLine());
String obtainedPWD=reader.readLine();
boolean found=false;
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),reader.readLine());
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName),ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
}
Assert.assertEquals(cId.toString(),containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
Assert.assertEquals(context.getNodeId().getHost(),containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
Assert.assertEquals(String.valueOf(HTTP_PORT),containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
Assert.assertEquals(StringUtils.join(",",appDirs),containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
Assert.assertEquals(StringUtils.join(",",containerLogDirs),containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.USER.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
found=false;
obtainedPWD=containerLaunchContext.getEnvironment().get(Environment.PWD.name());
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
List containerIds=new ArrayList();
containerIds.add(cId);
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus());
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testHierarchicalSubDirectoryCreation(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
LocalCacheDirectoryManager hDir=new LocalCacheDirectoryManager(conf);
Assert.assertTrue(hDir.getRelativePathForLocalization().isEmpty());
for (int i=1; i <= 37 * 36 * 36; i++) {
StringBuffer sb=new StringBuffer();
String num=Integer.toString(i - 1,36);
if (num.length() == 1) {
sb.append(num.charAt(0));
}
else {
sb.append(Integer.toString(Integer.parseInt(num.substring(0,1),36) - 1,36));
}
for (int j=1; j < num.length(); j++) {
sb.append(Path.SEPARATOR).append(num.charAt(j));
}
Assert.assertEquals(sb.toString(),hDir.getRelativePathForLocalization());
}
String testPath1="4";
String testPath2="2";
hDir.decrementFileCountForPath(testPath1);
hDir.decrementFileCountForPath(testPath2);
Assert.assertEquals(testPath1,hDir.getRelativePathForLocalization());
Assert.assertEquals(testPath2,hDir.getRelativePathForLocalization());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIncrementFileCountForPath(){
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2);
LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf);
final String rootPath="";
mgr.incrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization()));
mgr.getRelativePathForLocalization();
mgr.decrementFileCountForPath(rootPath);
mgr.decrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
String otherDir=mgr.getRelativePathForLocalization();
Assert.assertFalse("root dir should be full",otherDir.equals(rootPath));
final String deepDir0="d/e/e/p/0";
final String deepDir1="d/e/e/p/1";
final String deepDir2="d/e/e/p/2";
final String deepDir3="d/e/e/p/3";
mgr.incrementFileCountForPath(deepDir0);
Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization());
Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization());
Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization());
mgr.incrementFileCountForPath(deepDir2);
mgr.incrementFileCountForPath(deepDir1);
mgr.incrementFileCountForPath(deepDir2);
Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testResourceOrder() throws URISyntaxException {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
long basetime=r.nextLong() >>> 2;
org.apache.hadoop.yarn.api.records.LocalResource yA=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
final LocalResourceRequest a=new LocalResourceRequest(yA);
org.apache.hadoop.yarn.api.records.LocalResource yB=getYarnResource(new Path("http://yak.org:80/foobaz"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
LocalResourceRequest b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime + 1,FILE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/food/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,null);
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1);
tracker.handle(reqEvent1);
dispatcher.await();
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir);
ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class);
ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class);
verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture());
LocalResourceProto lrProto=localResourceCaptor.getValue();
Path localizedPath1=pathCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto)));
Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent());
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,pathCaptor.getValue(),120);
tracker.handle(rle1);
dispatcher.await();
ArgumentCaptor localizedProtoCaptor=ArgumentCaptor.forClass(LocalizedResourceProto.class);
verify(stateStore).finishResourceLocalization(eq(user),eq(appId),localizedProtoCaptor.capture());
LocalizedResourceProto localizedProto=localizedProtoCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(localizedProto.getResource())));
Assert.assertEquals(localizedPath1.toString(),localizedProto.getLocalPath());
LocalizedResource localizedRsrc1=tracker.getLocalizedResource(lr1);
Assert.assertNotNull(localizedRsrc1);
tracker.handle(new ResourceReleaseEvent(lr1,cId1));
dispatcher.await();
boolean removeResult=tracker.remove(localizedRsrc1,mockDelService);
Assert.assertTrue(removeResult);
verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1));
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void test(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2);
LocalizerContext lc2=new LocalizerContext(user,cId2,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalResourceRequest req2=createLocalResourceRequest(user,2,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
LocalizedResource lr2=createLocalizedResource(req2,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
localrsrc.put(req2,lr2);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent req12Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc2);
ResourceEvent req21Event=new ResourceRequestEvent(req2,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
ResourceEvent rel12Event=new ResourceReleaseEvent(req1,cId2);
ResourceEvent rel21Event=new ResourceReleaseEvent(req2,cId1);
tracker.handle(req11Event);
tracker.handle(req12Event);
tracker.handle(req21Event);
dispatcher.await();
verify(localizerEventHandler,times(3)).handle(any(LocalizerResourceRequestEvent.class));
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertEquals(1,lr2.getRefCount());
tracker.handle(rel21Event);
dispatcher.await();
verifyTrackedResourceCount(tracker,2);
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertFalse(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,2);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
tracker.handle(rel11Event);
tracker.handle(rel12Event);
Assert.assertEquals(0,lr1.getRefCount());
Assert.assertTrue(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,1);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) @SuppressWarnings("unchecked") public void testLocalResourceCache(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
ConcurrentMap localrsrc=new ConcurrentHashMap();
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService());
LocalResourceRequest lr=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc1);
Assert.assertEquals(0,localrsrc.size());
tracker.handle(reqEvent1);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1));
Assert.assertEquals(ResourceState.DOWNLOADING,localrsrc.get(lr).getState());
ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2);
LocalizerContext lc2=new LocalizerContext(user,cId2,null);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc2);
tracker.handle(reqEvent2);
dispatcher.await();
Assert.assertEquals(2,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2));
ResourceEvent resourceFailedEvent=new ResourceFailedLocalizationEvent(lr,(new Exception("test").getMessage()));
LocalizedResource localizedResource=localrsrc.get(lr);
tracker.handle(resourceFailedEvent);
dispatcher.await();
Assert.assertEquals(0,localrsrc.size());
verify(containerEventHandler,times(2)).handle(isA(ContainerResourceFailedEvent.class));
Assert.assertEquals(ResourceState.FAILED,localizedResource.getState());
ResourceReleaseEvent relEvent1=new ResourceReleaseEvent(lr,cId1);
tracker.handle(relEvent1);
dispatcher.await();
ContainerId cId3=BuilderUtils.newContainerId(1,1,1,3);
LocalizerContext lc3=new LocalizerContext(user,cId3,null);
ResourceEvent reqEvent3=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc3);
tracker.handle(reqEvent3);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
ResourceReleaseEvent relEvent2=new ResourceReleaseEvent(lr,cId2);
tracker.handle(relEvent2);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
Path localizedPath=new Path("/tmp/file1");
ResourceLocalizedEvent localizedEvent=new ResourceLocalizedEvent(lr,localizedPath,123L);
tracker.handle(localizedEvent);
dispatcher.await();
verify(containerEventHandler,times(1)).handle(isA(ContainerResourceLocalizedEvent.class));
Assert.assertEquals(ResourceState.LOCALIZED,localrsrc.get(lr).getState());
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
ResourceReleaseEvent relEvent3=new ResourceReleaseEvent(lr,cId3);
tracker.handle(relEvent3);
dispatcher.await();
Assert.assertEquals(0,localrsrc.get(lr).getRefCount());
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ConcurrentMap localrsrc=new ConcurrentHashMap();
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService());
Path localDir=new Path("/tmp");
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent1);
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir).getParent();
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"),120);
tracker.handle(rle1);
LocalResourceRequest lr2=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent2);
Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir).getParent();
ResourceFailedLocalizationEvent rfe2=new ResourceFailedLocalizationEvent(lr2,new Exception("Test").toString());
tracker.handle(rfe2);
Assert.assertNotSame(hierarchicalPath1,hierarchicalPath2);
LocalResourceRequest lr3=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent3=new ResourceRequestEvent(lr3,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent3);
Path hierarchicalPath3=tracker.getPathForLocalization(lr3,localDir).getParent();
ResourceLocalizedEvent rle3=new ResourceLocalizedEvent(lr3,new Path(hierarchicalPath3.toUri().toString() + Path.SEPARATOR + "file3"),120);
tracker.handle(rle3);
Assert.assertEquals(hierarchicalPath3.toUri().toString(),hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0");
ResourceEvent relEvent1=new ResourceReleaseEvent(lr1,cId1);
tracker.handle(relEvent1);
int resources=0;
Iterator iter=tracker.iterator();
while (iter.hasNext()) {
iter.next();
resources++;
}
Assert.assertEquals(2,resources);
iter=tracker.iterator();
while (iter.hasNext()) {
LocalizedResource rsrc=iter.next();
if (rsrc.getRefCount() == 0) {
Assert.assertTrue(tracker.remove(rsrc,mockDelService));
resources--;
}
}
Assert.assertEquals(1,resources);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
tracker.handle(req11Event);
dispatcher.await();
Assert.assertEquals(1,lr1.getRefCount());
dispatcher.await();
verifyTrackedResourceCount(tracker,1);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1")));
LocalizedResource rsrcbefore=tracker.iterator().next();
File resFile=new File(lr1.getLocalPath().toUri().getRawPath().toString());
Assert.assertTrue(resFile.exists());
Assert.assertTrue(resFile.delete());
tracker.handle(req11Event);
dispatcher.await();
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
LocalizedResource rsrcafter=tracker.iterator().next();
if (rsrcbefore == rsrcafter) {
Assert.fail("Localized resource should not be equal");
}
tracker.handle(rel11Event);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
LocalResourceRequest req=new LocalResourceRequest(new Path("file:///tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
ContainerImpl container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalizerRunner localizerRunner1=rls.getLocalizerRunner(localizerId1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,1,200));
ContainerImpl container2=createMockContainer(user,2);
String localizerId2=container2.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId2,rls.new LocalizerRunner(new LocalizerContext(user,container2.getContainerId(),null),localizerId2));
LocalizerRunner localizerRunner2=rls.getLocalizerRunner(localizerId2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId2,1,200));
LocalResourcesTracker tracker=rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId);
LocalizedResource lr=tracker.getLocalizedResource(req);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,lr.sem.availablePermits());
LocalizerHeartbeatResponse response1=rls.heartbeat(createLocalizerStatus(localizerId1));
Assert.assertEquals(1,localizerRunner1.scheduled.size());
Assert.assertEquals(req.getResource(),response1.getResourceSpecs().get(0).getResource().getResource());
Assert.assertEquals(0,lr.sem.availablePermits());
LocalizerHeartbeatResponse response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1,req));
Assert.assertTrue(waitForResourceState(lr,rls,req,LocalResourceVisibility.PRIVATE,user,appId,ResourceState.FAILED,200));
Assert.assertTrue(lr.getState().equals(ResourceState.FAILED));
Assert.assertEquals(0,localizerRunner1.scheduled.size());
response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,localizerRunner2.pending.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception {
DrainDispatcher dispatcher1=null;
String user="testuser";
try {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1=new DrainDispatcher();
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher1,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
dispatcher1.register(LocalizationEventType.class,spyService);
spyService.init(conf);
Assert.assertEquals(0,spyService.getPublicLocalizer().pending.size());
LocalResourceRequest req=new LocalResourceRequest(new Path("/tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,"");
ApplicationImpl app=mock(ApplicationImpl.class);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
when(app.getAppId()).thenReturn(appId);
when(app.getUser()).thenReturn(user);
dispatcher1.getEventHandler().handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ContainerImpl container1=createMockContainer(user,1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PUBLIC,req));
Assert.assertTrue(waitForResourceState(null,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.DOWNLOADING,200));
Assert.assertTrue(waitForPublicDownloadToStart(spyService,1,200));
LocalizedResource lr=getLocalizedResource(spyService,req,LocalResourceVisibility.PUBLIC,user,null);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,spyService.getPublicLocalizer().pending.size());
Assert.assertEquals(0,lr.sem.availablePermits());
ContainerImpl container2=createMockContainer(user,2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PUBLIC,req));
Assert.assertFalse(waitForPublicDownloadToStart(spyService,2,100));
ResourceFailedLocalizationEvent locFailedEvent=new ResourceFailedLocalizationEvent(req,new Exception("test").toString());
spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,null).handle(locFailedEvent);
Assert.assertTrue(waitForResourceState(lr,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.FAILED,200));
lr.unlock();
spyService.getPublicLocalizer().pending.clear();
LocalizerResourceRequestEvent localizerEvent=new LocalizerResourceRequestEvent(lr,null,mock(LocalizerContext.class),null);
dispatcher1.getEventHandler().handle(localizerEvent);
Assert.assertFalse(waitForPublicDownloadToStart(spyService,1,100));
Assert.assertEquals(1,lr.sem.availablePermits());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
Container container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalResourceRequest reqPriv=new LocalResourceRequest(new Path("file:///tmp1"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
List privList=new ArrayList();
privList.add(reqPriv);
LocalResourceRequest reqApp=new LocalResourceRequest(new Path("file:///tmp2"),123L,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,"");
List appList=new ArrayList();
appList.add(reqApp);
Map> rsrcs=new HashMap>();
rsrcs.put(LocalResourceVisibility.APPLICATION,appList);
rsrcs.put(LocalResourceVisibility.PRIVATE,privList);
dispatcher1.getEventHandler().handle(new ContainerLocalizationRequestEvent(container1,rsrcs));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,2,500));
String userCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.FILECACHE));
String userAppCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.APPCACHE,appId.toString(),ContainerLocalizer.FILECACHE));
int returnedResources=0;
boolean appRsrc=false, privRsrc=false;
while (returnedResources < 2) {
LocalizerHeartbeatResponse response=rls.heartbeat(createLocalizerStatus(localizerId1));
for ( ResourceLocalizationSpec resourceSpec : response.getResourceSpecs()) {
returnedResources++;
Path destinationDirectory=new Path(resourceSpec.getDestinationDirectory().getFile());
if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) {
appRsrc=true;
Assert.assertEquals(userAppCachePath,destinationDirectory.getParent().toUri().toString());
}
else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) {
privRsrc=true;
Assert.assertEquals(userCachePath,destinationDirectory.getParent().toUri().toString());
}
else {
throw new Exception("Unexpected resource recevied.");
}
}
}
Assert.assertTrue(appRsrc && privRsrc);
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalizationHeartbeat() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
localDirs.add(lfs.makeQualified(new Path(basedir,0 + "")));
sDirs[0]=localDirs.get(0).toString();
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
DeletionService delServiceReal=new DeletionService(exec);
DeletionService delService=spy(delServiceReal);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
try {
spyService.init(conf);
spyService.start();
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn("user0");
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ArgumentMatcher matchesAppInit=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ApplicationEvent evt=(ApplicationEvent)o;
return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID();
}
}
;
dispatcher.await();
verify(applicationBus).handle(argThat(matchesAppInit));
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final Container c=getMockContainer(appId,42,"user0");
FSDataOutputStream out=new FSDataOutputStream(new DataOutputBuffer(),null);
doReturn(out).when(spylfs).createInternal(isA(Path.class),isA(EnumSet.class),isA(FsPermission.class),anyInt(),anyShort(),anyLong(),isA(Progressable.class),isA(ChecksumOpt.class),anyBoolean());
final LocalResource resource1=getPrivateMockedResource(r);
LocalResource resource2=null;
do {
resource2=getPrivateMockedResource(r);
}
while (resource2 == null || resource2.equals(resource1));
final LocalResourceRequest req1=new LocalResourceRequest(resource1);
final LocalResourceRequest req2=new LocalResourceRequest(resource2);
Map> rsrcs=new HashMap>();
List privateResourceList=new ArrayList();
privateResourceList.add(req1);
privateResourceList.add(req2);
rsrcs.put(LocalResourceVisibility.PRIVATE,privateResourceList);
spyService.handle(new ContainerLocalizationRequestEvent(c,rsrcs));
Thread.sleep(1000);
dispatcher.await();
String appStr=ConverterUtils.toString(appId);
String ctnrStr=c.getContainerId().toString();
ArgumentCaptor tokenPathCaptor=ArgumentCaptor.forClass(Path.class);
verify(exec).startLocalizer(tokenPathCaptor.capture(),isA(InetSocketAddress.class),eq("user0"),eq(appStr),eq(ctnrStr),isA(List.class),isA(List.class));
Path localizationTokenPath=tokenPathCaptor.getValue();
LocalResourceStatus rsrcStat1=mock(LocalResourceStatus.class);
LocalResourceStatus rsrcStat2=mock(LocalResourceStatus.class);
LocalizerStatus stat=mock(LocalizerStatus.class);
when(stat.getLocalizerId()).thenReturn(ctnrStr);
when(rsrcStat1.getResource()).thenReturn(resource1);
when(rsrcStat2.getResource()).thenReturn(resource2);
when(rsrcStat1.getLocalSize()).thenReturn(4344L);
when(rsrcStat2.getLocalSize()).thenReturn(2342L);
URL locPath=getPath("/cache/private/blah");
when(rsrcStat1.getLocalPath()).thenReturn(locPath);
when(rsrcStat2.getLocalPath()).thenReturn(locPath);
when(rsrcStat1.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(rsrcStat2.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(stat.getResources()).thenReturn(Collections.emptyList()).thenReturn(Collections.singletonList(rsrcStat1)).thenReturn(Collections.singletonList(rsrcStat2)).thenReturn(Collections.emptyList());
String localPath=Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR+ "user0"+ Path.SEPARATOR+ ContainerLocalizer.FILECACHE;
LocalizerHeartbeatResponse response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req1,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
URL localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "10"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req2,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "0"+ Path.SEPARATOR+ "11"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(0,response.getResourceSpecs().size());
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.DIE,response.getLocalizerAction());
dispatcher.await();
ArgumentMatcher matchesContainerLoc=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainerEvent evt=(ContainerEvent)o;
return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID();
}
}
;
verify(containerBus,times(2)).handle(argThat(matchesContainerLoc));
verify(delService).delete((String)isNull(),eq(localizationTokenPath));
}
finally {
spyService.stop();
dispatcher.stop();
delService.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testRsrcUnused(){
DeletionService delService=mock(DeletionService.class);
long TARGET_MB=10 << 20;
ResourceRetentionSet rss=new ResourceRetentionSet(delService,TARGET_MB);
LocalResourcesTracker pubTracker=createMockTracker(null,3 * 1024 * 1024,2,10,5);
LocalResourcesTracker trackerA=createMockTracker("A",1 * 1024 * 1024,4,3,3);
LocalResourcesTracker trackerB=createMockTracker("B",4 * 1024 * 1024,1,10,5);
LocalResourcesTracker trackerC=createMockTracker("C",2 * 1024 * 1024,3,7,2);
rss.addResources(pubTracker);
rss.addResources(trackerA);
rss.addResources(trackerB);
rss.addResources(trackerC);
long deleted=0L;
ArgumentCaptor captor=ArgumentCaptor.forClass(LocalizedResource.class);
verify(pubTracker,atMost(2)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerA,atMost(4)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerB,atMost(1)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerC,atMost(3)).remove(captor.capture(),isA(DeletionService.class));
for ( LocalizedResource rem : captor.getAllValues()) {
deleted+=rem.getSize();
}
assertTrue(deleted >= 10 * 1024 * 1024);
assertTrue(deleted < 15 * 1024 * 1024);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testLocalFileDeletionAfterUpload() throws Exception {
this.delSrvc=new DeletionService(createContainerExecutor());
delSrvc=spy(delSrvc);
this.delSrvc.init(conf);
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(application1,1);
ContainerId container11=BuilderUtils.newContainerId(appAttemptId,1);
writeContainerLogs(app1LogDir,container11);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
verify(delSrvc).delete(eq(user),eq((Path)null),eq(new Path(app1LogDir.getAbsolutePath())));
delSrvc.stop();
String containerIdStr=ConverterUtils.toString(container11);
File containerLogDir=new File(app1LogDir,containerIdStr);
for ( String fileType : new String[]{"stdout","stderr","syslog"}) {
File f=new File(containerLogDir,fileType);
Assert.assertFalse("check " + f,f.exists());
}
Assert.assertFalse(app1LogDir.exists());
Path logFilePath=logAggregationService.getRemoteNodeLogFileForApp(application1,this.user);
Assert.assertTrue("Log file [" + logFilePath + "] not found",new File(logFilePath.toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID");
dispatcher.stop();
}
APIUtilityVerifier BooleanVerifier
@Test public void testVerifyAndCreateRemoteDirNonExistence() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
File aNewFile=new File(String.valueOf("tmp" + System.currentTimeMillis()));
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,aNewFile.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
boolean existsBefore=aNewFile.exists();
assertTrue("The new file already exists!",!existsBefore);
logAggregationService.verifyAndCreateRemoteLogDir(this.conf);
boolean existsAfter=aNewFile.exists();
assertTrue("The new aggregate file is not successfully created",existsAfter);
aNewFile.delete();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testNoContainerOnNode() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
Assert.assertFalse(new File(logAggregationService.getRemoteNodeLogFileForApp(application1,this.user).toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID");
dispatcher.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify the check for whether a process tree is over limit or not.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test public void testProcessTreeLimits() throws IOException {
File procfsRootDir=new File(localDir,"proc");
String[] pids={"100","200","300","400","500","600","700"};
try {
TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir,pids);
TestProcfsBasedProcessTree.ProcessStatInfo[] procs=new TestProcfsBasedProcessTree.ProcessStatInfo[7];
procs[0]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000"});
procs[1]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"200","proc2","1","200","200","200000"});
procs[2]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"300","proc3","200","200","200","300000"});
procs[3]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"400","proc4","200","200","200","400000"});
procs[4]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"500","proc5","100","100","100","1500000"});
procs[5]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"600","proc6","1","600","600","100000"});
procs[6]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"700","proc7","600","600","600","100000"});
TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir,pids,procs,null);
long limit=700000;
ContainersMonitorImpl test=new ContainersMonitorImpl(null,null,null);
ProcfsBasedProcessTree pTree=new ProcfsBasedProcessTree("100",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertTrue("tree rooted at 100 should be over limit " + "after first iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("200",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 200 shouldn't be over limit " + "after one iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertTrue("tree rooted at 200 should be over limit after 2 iterations",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("600",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
if (!ProcfsBasedProcessTree.isAvailable()) {
return;
}
containerManager.start();
File scriptFile=new File(tmpDir,"scriptFile.sh");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile();
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nsleep 15");
fileWriter.close();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
int port=12345;
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
commands.add("/bin/bash");
commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Resource r=BuilderUtils.newResource(8 * 1024 * 1024,1);
ContainerTokenIdentifier containerIdentifier=new ContainerTokenIdentifier(cId,context.getNodeId().toString(),user,r,System.currentTimeMillis() + 120000,123,DUMMY_RM_IDENTIFIER,Priority.newInstance(0),0);
Token containerToken=BuilderUtils.newContainerToken(context.getNodeId(),containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier),containerIdentifier);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!",reader.readLine());
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE,60);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,containerStatus.getExitStatus());
String expectedMsgPattern="Container \\[pid=" + pid + ",containerID="+ cId+ "\\] is running beyond virtual memory limits. Current usage: "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "+ "Killing container.\nDump of the process-tree for "+ cId+ " :\n";
Pattern pat=Pattern.compile(expectedMsgPattern);
Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: "+ containerStatus.getDiagnostics(),true,pat.matcher(containerStatus.getDiagnostics()).find());
Assert.assertFalse("Process is still alive!",exec.signalContainer(user,pid,Signal.NULL));
}
BooleanVerifier
@Test public void testEmptyState() throws IOException {
assertTrue(stateStore.canRecover());
verifyEmptyState();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveLocalizedResource() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(789L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
LocalizedResourceProto pubLocalizedProto2=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto2).setLocalPath(pubRsrcLocalPath2.toString()).setSize(7654321L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto2);
stateStore.removeLocalizedResource(null,null,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
stateStore.removeLocalizedResource(user,null,privRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getInProgressResources().isEmpty());
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
Map userResources=state.getUserResources();
assertTrue(userResources.isEmpty());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationStorage() throws IOException {
RecoveredApplicationsState state=stateStore.loadApplicationsState();
assertTrue(state.getApplications().isEmpty());
assertTrue(state.getFinishedApplications().isEmpty());
final ApplicationId appId1=ApplicationId.newInstance(1234,1);
ContainerManagerApplicationProto.Builder builder=ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl)appId1).getProto());
builder.setUser("user1");
ContainerManagerApplicationProto appProto1=builder.build();
stateStore.storeApplication(appId1,appProto1);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(1,state.getApplications().size());
assertEquals(appProto1,state.getApplications().get(0));
assertTrue(state.getFinishedApplications().isEmpty());
stateStore.storeFinishedApplication(appId1);
final ApplicationId appId2=ApplicationId.newInstance(1234,2);
builder=ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl)appId2).getProto());
builder.setUser("user2");
ContainerManagerApplicationProto appProto2=builder.build();
stateStore.storeApplication(appId2,appProto2);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(2,state.getApplications().size());
assertTrue(state.getApplications().contains(appProto1));
assertTrue(state.getApplications().contains(appProto2));
assertEquals(1,state.getFinishedApplications().size());
assertEquals(appId1,state.getFinishedApplications().get(0));
stateStore.storeFinishedApplication(appId2);
stateStore.removeApplication(appId2);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(1,state.getApplications().size());
assertEquals(appProto1,state.getApplications().get(0));
assertEquals(1,state.getFinishedApplications().size());
assertEquals(appId1,state.getFinishedApplications().get(0));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNMTokenStorage() throws IOException {
RecoveredNMTokensState state=stateStore.loadNMTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
NMTokenSecretManagerForTest secretMgr=new NMTokenSecretManagerForTest();
MasterKey currentKey=secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
MasterKey prevKey=secretMgr.generateKey();
stateStore.storeNMTokenPreviousMasterKey(prevKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1);
MasterKey attemptKey1=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt1,attemptKey1);
ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,3),4);
MasterKey attemptKey2=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
Map loadedAppKeys=state.getApplicationMasterKeys();
assertEquals(2,loadedAppKeys.size());
assertEquals(attemptKey1,loadedAppKeys.get(attempt1));
assertEquals(attemptKey2,loadedAppKeys.get(attempt2));
ApplicationAttemptId attempt3=ApplicationAttemptId.newInstance(ApplicationId.newInstance(5,6),7);
MasterKey attemptKey3=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt3,attemptKey3);
stateStore.removeNMTokenApplicationMasterKey(attempt1);
attemptKey2=prevKey;
stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2);
prevKey=currentKey;
stateStore.storeNMTokenPreviousMasterKey(prevKey);
currentKey=secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
loadedAppKeys=state.getApplicationMasterKeys();
assertEquals(2,loadedAppKeys.size());
assertNull(loadedAppKeys.get(attempt1));
assertEquals(attemptKey2,loadedAppKeys.get(attempt2));
assertEquals(attemptKey3,loadedAppKeys.get(attempt3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerStorage() throws IOException {
List recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
ApplicationId appId=ApplicationId.newInstance(1234,3);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,4);
ContainerId containerId=ContainerId.newInstance(appAttemptId,5);
LocalResource lrsrc=LocalResource.newInstance(URL.newInstance("hdfs","somehost",12345,"/some/path/to/rsrc"),LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,123L,1234567890L);
Map localResources=new HashMap();
localResources.put("rsrc",lrsrc);
Map env=new HashMap();
env.put("somevar","someval");
List containerCmds=new ArrayList();
containerCmds.add("somecmd");
containerCmds.add("somearg");
Map serviceData=new HashMap();
serviceData.put("someservice",ByteBuffer.wrap(new byte[]{0x1,0x2,0x3}));
ByteBuffer containerTokens=ByteBuffer.wrap(new byte[]{0x7,0x8,0x9,0xa});
Map acls=new HashMap();
acls.put(ApplicationAccessType.VIEW_APP,"viewuser");
acls.put(ApplicationAccessType.MODIFY_APP,"moduser");
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,env,containerCmds,serviceData,containerTokens,acls);
Resource containerRsrc=Resource.newInstance(1357,3);
ContainerTokenIdentifier containerTokenId=new ContainerTokenIdentifier(containerId,"host","user",containerRsrc,9876543210L,42,2468,Priority.newInstance(7),13579);
Token containerToken=Token.newInstance(containerTokenId.getBytes(),ContainerTokenIdentifier.KIND.toString(),"password".getBytes(),"tokenservice");
StartContainerRequest containerReq=StartContainerRequest.newInstance(clc,containerToken);
stateStore.storeContainer(containerId,containerReq);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
RecoveredContainerState rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.REQUESTED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
StringBuilder diags=new StringBuilder();
stateStore.storeContainerLaunched(containerId);
diags.append("some diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some more diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerKilled(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some final diags");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerCompleted(containerId,21);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.COMPLETED,rcs.getStatus());
assertEquals(21,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
stateStore.removeContainer(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckVersion() throws IOException {
Version defaultVersion=stateStore.getCurrentVersion();
Assert.assertEquals(defaultVersion,stateStore.loadVersion());
Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2);
stateStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion,stateStore.loadVersion());
restartStateStore();
Assert.assertEquals(defaultVersion,stateStore.loadVersion());
Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion());
stateStore.storeVersion(incompatibleVersion);
try {
restartStateStore();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for NM state:"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDeletionTaskStorage() throws IOException {
RecoveredDeletionServiceState state=stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
DeletionServiceDeleteTaskProto proto=DeletionServiceDeleteTaskProto.newBuilder().setId(7).setUser("someuser").setSubdir("some/subdir").addBasedirs("some/dir/path").addBasedirs("some/other/dir/path").setDeletionTime(123456L).addSuccessorIds(8).addSuccessorIds(9).build();
stateStore.storeDeletionTask(proto.getId(),proto);
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(1,state.getTasks().size());
assertEquals(proto,state.getTasks().get(0));
DeletionServiceDeleteTaskProto proto2=DeletionServiceDeleteTaskProto.newBuilder().setId(8).setUser("user2").setSubdir("subdir2").setDeletionTime(789L).build();
stateStore.storeDeletionTask(proto2.getId(),proto2);
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(2,state.getTasks().size());
assertTrue(state.getTasks().contains(proto));
assertTrue(state.getTasks().contains(proto2));
stateStore.removeDeletionTask(proto2.getId());
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(1,state.getTasks().size());
assertEquals(proto,state.getTasks().get(0));
stateStore.removeDeletionTask(proto.getId());
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStartResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertEquals(2,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath1,pubts.getInProgressResources().get(pubRsrcProto1));
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertEquals(1,privts.getInProgressResources().size());
assertEquals(privRsrcLocalPath,privts.getInProgressResources().get(privRsrcProto));
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFinishResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(pubRsrcProto1.getSize()).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
LocalizedResourceProto privLocalizedProto=LocalizedResourceProto.newBuilder().setResource(privRsrcProto).setLocalPath(privRsrcLocalPath.toString()).setSize(privRsrcProto.getSize()).build();
stateStore.finishResourceLocalization(user,null,privLocalizedProto);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
assertEquals(1,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertEquals(1,privts.getLocalizedResources().size());
assertEquals(privLocalizedProto,privts.getLocalizedResources().iterator().next());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerTokenStorage() throws IOException {
RecoveredContainerTokensState state=stateStore.loadContainerTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(new YarnConfiguration());
MasterKey currentKey=keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
MasterKey prevKey=keygen.generateKey();
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1);
Long expTime1=1234567890L;
ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2);
Long expTime2=9876543210L;
stateStore.storeContainerToken(cid1,expTime1);
stateStore.storeContainerToken(cid2,expTime2);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
Map loadedActiveTokens=state.getActiveTokens();
assertEquals(2,loadedActiveTokens.size());
assertEquals(expTime1,loadedActiveTokens.get(cid1));
assertEquals(expTime2,loadedActiveTokens.get(cid2));
ContainerId cid3=BuilderUtils.newContainerId(3,3,3,3);
Long expTime3=135798642L;
stateStore.storeContainerToken(cid3,expTime3);
stateStore.removeContainerToken(cid1);
expTime2+=246897531L;
stateStore.storeContainerToken(cid2,expTime2);
prevKey=currentKey;
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
currentKey=keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
loadedActiveTokens=state.getActiveTokens();
assertEquals(2,loadedActiveTokens.size());
assertNull(loadedActiveTokens.get(cid1));
assertEquals(expTime2,loadedActiveTokens.get(cid2));
assertEquals(expTime3,loadedActiveTokens.get(cid3));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
final NodeId nodeId=NodeId.newInstance("somehost",1234);
final ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1);
final ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2);
ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(conf);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMContainerTokenSecretManager secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
ContainerTokenIdentifier tokenId1=createContainerTokenId(cid1,nodeId,"user1",secretMgr);
ContainerTokenIdentifier tokenId2=createContainerTokenId(cid2,nodeId,"user2",secretMgr);
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr.startContainerSuccessful(tokenId2);
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
try {
secretMgr.retrievePassword(tokenId1);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
try {
secretMgr.retrievePassword(tokenId2);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
stateStore.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
final NodeId nodeId=NodeId.newInstance("somehost",1234);
final ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1);
final ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,2),2);
NMTokenKeyGeneratorForTest keygen=new NMTokenKeyGeneratorForTest();
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMTokenSecretManagerInNM secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
NMTokenIdentifier attemptToken1=getNMTokenId(secretMgr.createNMToken(attempt1,nodeId,"user1"));
NMTokenIdentifier attemptToken2=getNMTokenId(secretMgr.createNMToken(attempt2,nodeId,"user2"));
secretMgr.appAttemptStartContainer(attemptToken1);
secretMgr.appAttemptStartContainer(attemptToken2);
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr.appFinished(attempt1.getApplicationId());
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
assertNotNull(secretMgr.retrievePassword(attemptToken2));
secretMgr.appFinished(attempt2.getApplicationId());
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
try {
secretMgr.retrievePassword(attemptToken2);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
stateStore.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteCgroup() throws Exception {
final MockClock clock=new MockClock();
clock.time=System.currentTimeMillis();
CgroupsLCEResourcesHandler handler=new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
handler.clock=clock;
File file=new File("target",UUID.randomUUID().toString());
new FileOutputStream(file).close();
Assert.assertTrue(handler.deleteCgroup(file.getPath()));
final CountDownLatch latch=new CountDownLatch(1);
new Thread(){
@Override public void run(){
latch.countDown();
try {
Thread.sleep(200);
}
catch ( InterruptedException ex) {
}
clock.time+=YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT;
}
}
.start();
latch.await();
file=new File("target",UUID.randomUUID().toString());
Assert.assertFalse(handler.deleteCgroup(file.getPath()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=30000) public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir=new File("target",TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile=absLogDir.toURI().toString();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS,logdirwithFile);
NodeHealthCheckerService healthChecker=new NodeHealthCheckerService();
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler=healthChecker.getDiskHandler();
NMContext nmContext=new NodeManager.NMContext(null,null,dirsHandler,new ApplicationACLsManager(conf),new NMNullStateStoreService());
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(conf);
String user="nobody";
long clusterTimeStamp=1234;
ApplicationId appId=BuilderUtils.newApplicationId(recordFactory,clusterTimeStamp,1);
Application app=mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId container1=BuilderUtils.newContainerId(recordFactory,appId,appAttemptId,0);
nmContext.getApplications().put(appId,app);
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),conf,user,appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1,container);
List files=null;
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLogs() throws IOException {
WebResource r=resource();
final ContainerId containerId=BuilderUtils.newContainerId(0,0,0,0);
final String containerIdStr=BuilderUtils.newContainerId(0,0,0,0).toString();
final ApplicationAttemptId appAttemptId=containerId.getApplicationAttemptId();
final ApplicationId appId=appAttemptId.getApplicationId();
final String appIdStr=appId.toString();
final String filename="logfile1";
final String logMessage="log message\n";
nmContext.getApplications().put(appId,new ApplicationImpl(null,"user",appId,null,nmContext));
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),new Configuration(),"user",appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId,container);
Path path=dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr,containerIdStr) + "/" + filename,false);
File logFile=new File(path.toUri().getPath());
logFile.deleteOnExit();
assertTrue("Failed to create log dir",logFile.getParentFile().mkdirs());
PrintWriter pw=new PrintWriter(logFile);
pw.print(logMessage);
pw.close();
ClientResponse response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path("uhhh").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
Assert.assertEquals(Status.NOT_FOUND.getStatusCode(),response.getStatus());
responseText=response.getEntity(String.class);
assertTrue(responseText.contains("Cannot find this log on the local disk."));
nmContext.getContainers().remove(containerId);
Assert.assertNull(nmContext.getContainers().get(containerId));
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuthorizedAccess() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
rm=new MockRMWithAMS(conf,containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
Map acls=new HashMap(2);
acls.put(ApplicationAccessType.VIEW_APP,"*");
RMApp app=rm.submitApp(1024,"appname","appuser",acls);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rm.getApplicationMasterService().getBindAddress(),conf);
}
}
);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
RegisterApplicationMasterResponse response=client.registerApplicationMaster(request);
Assert.assertNotNull(response.getClientToAMTokenMasterKey());
if (UserGroupInformation.isSecurityEnabled()) {
Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
}
Assert.assertEquals("Register response has bad ACLs","*",response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP));
}
BranchVerifier UtilityVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testUnauthorizedAccess() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
rm=new MockRMWithAMS(conf,containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 40) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
final InetSocketAddress serviceAddr=conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,serviceAddr,conf);
}
}
);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
try {
client.registerApplicationMaster(request);
Assert.fail("Should fail with authorization error");
}
catch ( Exception e) {
if (isCause(AccessControlException.class,e)) {
String expectedMessage="";
if (UserGroupInformation.isSecurityEnabled()) {
expectedMessage="Client cannot authenticate via:[TOKEN]";
}
else {
expectedMessage="SIMPLE authentication is not enabled. Available:[TOKEN]";
}
Assert.assertTrue(e.getCause().getMessage().contains(expectedMessage));
}
else {
throw e;
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception {
ApplicationId appId=MockApps.newAppID(0);
asContext.setApplicationId(appId);
RMApp appOrig=rmContext.getRMApps().get(appId);
Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName());
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Exception is expected when applicationId is duplicate.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!"));
}
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testEscapeApplicationSummary(){
RMApp app=mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(ApplicationId.newInstance(100L,1));
when(app.getName()).thenReturn("Multiline\n\n\r\rAppName");
when(app.getUser()).thenReturn("Multiline\n\n\r\rUserName");
when(app.getQueue()).thenReturn("Multiline\n\n\r\rQueueName");
when(app.getState()).thenReturn(RMAppState.RUNNING);
RMAppManager.ApplicationSummary.SummaryBuilder summary=new RMAppManager.ApplicationSummary().createAppSummary(app);
String msg=summary.toString();
LOG.info("summary: " + msg);
Assert.assertFalse(msg.contains("\n"));
Assert.assertFalse(msg.contains("\r"));
String escaped="\\n\\n\\r\\r";
Assert.assertTrue(msg.contains("Multiline" + escaped + "AppName"));
Assert.assertTrue(msg.contains("Multiline" + escaped + "UserName"));
Assert.assertTrue(msg.contains("Multiline" + escaped + "QueueName"));
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testRMAppSubmitInvalidResourceRequest() throws Exception {
asContext.setResource(Resources.createResource(YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1));
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Application submission should fail because resource" + " request is invalid.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not" + " InvalidResourceRequestException",e.getMessage().contains("Invalid resource request"));
}
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setup() throws UnknownHostException {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf=new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED,"true");
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAMLaunchAndCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MyContainerManagerImpl containerManager=new MyContainerManagerImpl();
MockRMWithCustomAMLauncher rm=new MockRMWithCustomAMLauncher(containerManager);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5120);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.launched == false && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertTrue(containerManager.launched);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId appAttemptId=attempt.getAppAttemptId();
Assert.assertEquals(appAttemptId.toString(),containerManager.attemptIdAtContainerManager);
Assert.assertEquals(app.getSubmitTime(),containerManager.submitTimeAtContainerManager);
Assert.assertEquals(app.getRMAppAttempt(appAttemptId).getMasterContainer().getId().toString(),containerManager.containerIdAtContainerManager);
Assert.assertEquals(nm1.getNodeId().toString(),containerManager.nmHostAtContainerManager);
Assert.assertEquals(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS,containerManager.maxAppAttempts);
MockAM am=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),appAttemptId);
am.registerAppAttempt();
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
waitCount=0;
while (containerManager.cleanedup == false && waitCount++ < 20) {
LOG.info("Waiting for AM Cleanup to happen..");
Thread.sleep(1000);
}
Assert.assertTrue(containerManager.cleanedup);
am.waitForState(RMAppAttemptState.FINISHED);
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unused") @Test(timeout=100000) public void testallocateBeforeAMRegistration() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
boolean thrown=false;
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
int request=2;
AllocateResponse ar=am.allocate("h1",1000,request,new ArrayList());
Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC);
nm1.nodeHeartbeat(true);
AllocateResponse amrs=am.allocate(new ArrayList(),new ArrayList());
Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC);
am.registerAppAttempt();
thrown=false;
try {
am.registerAppAttempt(false);
}
catch ( Exception e) {
Assert.assertEquals("Application Master is already registered : " + attempt.getAppAttemptId().getApplicationId(),e.getMessage());
thrown=true;
}
Assert.assertTrue(thrown);
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
AllocateResponse amrs2=am.allocate(new ArrayList(),new ArrayList());
Assert.assertTrue(amrs2.getAMCommand() == AMCommand.AM_SHUTDOWN);
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=1200000) public void testFinishApplicationMasterBeforeRegistering() throws Exception {
MockRM rm=new MockRM(conf);
try {
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
RMApp app1=rm.submitApp(2048);
MockAM am1=MockRM.launchAM(app1,rm,nm1);
FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.FAILED,"","");
Throwable cause=null;
try {
am1.unregisterAppAttempt(req,false);
}
catch ( Exception e) {
cause=e.getCause();
}
Assert.assertNotNull(cause);
Assert.assertTrue(cause instanceof ApplicationMasterNotRegisteredException);
Assert.assertNotNull(cause.getMessage());
Assert.assertTrue(cause.getMessage().contains("Application Master is trying to unregister before registering for:"));
am1.registerAppAttempt();
am1.unregisterAppAttempt(req,false);
}
finally {
if (rm != null) {
rm.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=600000) public void testInvalidContainerReleaseRequest() throws Exception {
MockRM rm=new MockRM(conf);
try {
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
RMApp app1=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[]{"127.0.0.1"},GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response=am1.schedule();
}
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() > 0);
RMApp app2=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
ContainerId cId=alloc1Response.getAllocatedContainers().get(0).getId();
am2.addContainerToBeReleased(cId);
try {
am2.schedule();
Assert.fail("Exception was expected!!");
}
catch ( InvalidContainerReleaseException e) {
StringBuilder sb=new StringBuilder("Cannot release container : ");
sb.append(cId.toString());
sb.append(" not belonging to this application attempt : ");
sb.append(attempt2.getAppAttemptId().toString());
Assert.assertTrue(e.getMessage().contains(sb.toString()));
}
}
finally {
if (rm != null) {
rm.stop();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testForceKillApplication() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm=new MockRM();
rm.init(conf);
rm.start();
ClientRMService rmService=rm.getClientRMService();
GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED));
RMApp app1=rm.submitApp(1024);
RMApp app2=rm.submitApp(1024,true);
assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId());
KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId());
int killAttemptCount=0;
for (int i=0; i < 100; i++) {
KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1);
killAttemptCount++;
if (killResponse1.getIsKillCompleted()) {
break;
}
Thread.sleep(10);
}
assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1);
assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2);
assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted());
for (int i=0; i < 100; i++) {
if (2 == rmService.getApplications(getRequest).getApplicationList().size()) {
break;
}
Thread.sleep(10);
}
assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testTokenRenewalWrongUser() throws Exception {
try {
owner.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenRenewal(owner,other);
return null;
}
catch ( YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(owner.getUserName() + " tries to renew a token with renewer " + other.getUserName()));
throw ex;
}
}
}
);
}
catch ( Exception e) {
return;
}
Assert.fail("renew should have failed");
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testTokenCancellationByWrongUser(){
RMContext rmContext=mock(RMContext.class);
final ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,dtsm);
UserGroupInformation[] kerbTestOwners={owner,other,tester,ownerKerb,otherKerb};
UserGroupInformation[] kerbTestRenewers={owner,other,ownerKerb,otherKerb};
for ( final UserGroupInformation tokOwner : kerbTestOwners) {
for ( final UserGroupInformation tokRenewer : kerbTestRenewers) {
try {
testerKerb.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenCancellation(rmService,tokOwner,tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName());
return null;
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().contains(testerKerb.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
}
);
}
catch ( Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
UserGroupInformation[] simpleTestOwners={owner,other,ownerKerb,otherKerb,testerKerb};
UserGroupInformation[] simpleTestRenewers={owner,other,ownerKerb,otherKerb};
for ( final UserGroupInformation tokOwner : simpleTestOwners) {
for ( final UserGroupInformation tokRenewer : simpleTestRenewers) {
try {
tester.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenCancellation(tokOwner,tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName());
return null;
}
catch ( YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(tester.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
}
);
}
catch ( Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationToken() throws IOException, InterruptedException {
final YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL,"testuser/localhost@apache.org");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
ResourceScheduler scheduler=createMockScheduler(conf);
long initialInterval=10000l;
long maxLifetime=20000l;
long renewInterval=10000l;
RMDelegationTokenSecretManager rmDtSecretManager=createRMDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval);
rmDtSecretManager.startThreads();
LOG.info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: "+ maxLifetime+ ", renewInterval: "+ renewInterval);
final ClientRMService clientRMService=new ClientRMServiceForTest(conf,scheduler,rmDtSecretManager);
clientRMService.init(conf);
clientRMService.start();
ApplicationClientProtocol clientRMWithDT=null;
try {
UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG");
Assert.assertEquals("testrenewer",loggedInUser.getShortUserName());
loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
org.apache.hadoop.yarn.api.records.Token token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName());
long tokenFetchTime=System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser1",conf);
GetNewApplicationRequest request=Records.newRecord(GetNewApplicationRequest.class);
try {
clientRMWithDT.getNewApplication(request);
}
catch ( IOException e) {
fail("Unexpected exception" + e);
}
catch ( YarnException e) {
fail("Unexpected exception" + e);
}
while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) {
Thread.sleep(500l);
}
long nextExpTime=renewDelegationToken(loggedInUser,clientRMService,token);
long renewalTime=System.currentTimeMillis();
LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime);
while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) {
Thread.sleep(500l);
}
Thread.sleep(50l);
try {
clientRMWithDT.getNewApplication(request);
}
catch ( IOException e) {
fail("Unexpected exception" + e);
}
catch ( YarnException e) {
fail("Unexpected exception" + e);
}
while (System.currentTimeMillis() < renewalTime + renewInterval) {
Thread.sleep(500l);
}
Thread.sleep(50l);
LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid");
try {
clientRMWithDT.getNewApplication(request);
fail("Should not have succeeded with an expired token");
}
catch ( Exception e) {
assertEquals(InvalidToken.class.getName(),e.getClass().getName());
assertTrue(e.getMessage().contains("is expired"));
}
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
clientRMWithDT=null;
}
token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName());
tokenFetchTime=System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf);
request=Records.newRecord(GetNewApplicationRequest.class);
try {
clientRMWithDT.getNewApplication(request);
}
catch ( IOException e) {
fail("Unexpected exception" + e);
}
catch ( YarnException e) {
fail("Unexpected exception" + e);
}
cancelDelegationToken(loggedInUser,clientRMService,token);
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
clientRMWithDT=null;
}
clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf);
LOG.info("Cancelled delegation token at: " + System.currentTimeMillis());
try {
clientRMWithDT.getNewApplication(request);
fail("Should not have succeeded with a cancelled delegation token");
}
catch ( IOException e) {
}
catch ( YarnException e) {
}
}
finally {
rmDtSecretManager.stopThreads();
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
}
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
FifoScheduler scheduler=new FifoScheduler();
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testKillAppWhenFailoverHappensAtNewState() throws Exception {
startRMsWithCustomizedRMAppManager();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false);
try {
failOverAndKillApp(app0.getApplicationId(),RMAppState.NEW);
fail("Should get an exception here");
}
catch ( ApplicationNotFoundException ex) {
Assert.assertTrue(ex.getMessage().contains("Trying to kill an absent application " + app0.getApplicationId()));
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=80000) public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200);
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1);
RMApp app2=rm1.submitApp(200);
MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(),RMAppState.FAILED);
RMApp app3=rm1.submitApp(200);
MockAM am3=MockRM.launchAndRegisterAM(app3,rm1,nm1);
rm1.killApp(app3.getApplicationId());
rm1.waitForState(app3.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am3.getApplicationAttemptId(),RMAppAttemptState.KILLED);
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm1.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
Assert.assertEquals(3,appList1.size());
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) {
Assert.assertEquals("N/A",report.getHost());
Assert.assertEquals(-1,report.getRpcPort());
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertFalse(report.getHost().equals("N/A"));
Assert.assertTrue(report.getRpcPort() != -1);
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testNMTokenSentForNormalContainer() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class.getCanonicalName());
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5120);
RMApp app=rm.submitApp(2000);
RMAppAttempt attempt=app.getCurrentAppAttempt();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId();
nm1.nodeHeartbeat(true);
MockAM am=MockRM.launchAM(app,rm,nm1);
Assert.assertTrue(attempt.getMasterContainer().getId().getId() != 1);
Assert.assertFalse(rm.getRMContext().getNMTokenSecretManager().isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
am.registerAppAttempt();
rm.waitForState(app.getApplicationId(),RMAppState.RUNNING);
int NUM_CONTAINERS=1;
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am.allocate("127.0.0.1",2000,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == NUM_CONTAINERS) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
NodeId nodeId=expectedNMTokens.get(0).getNodeId();
Assert.assertEquals(nm1.getNodeId(),nodeId);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=40000) public void testNMToken() throws Exception {
MockRM rm=new MockRM();
try {
rm.start();
MockNM nm1=rm.registerNode("h1:1234",10000);
NMTokenSecretManagerInRM nmTokenSecretManager=rm.getRMContext().getNMTokenSecretManager();
RMApp app=rm.submitApp(1000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
am.registerAppAttempt();
ArrayList containersReceivedForNM1=new ArrayList();
List releaseContainerList=new ArrayList();
HashMap nmTokens=new HashMap();
AllocateResponse response=am.allocate("h1",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,2,nmTokens,nm1);
Assert.assertEquals(1,nmTokens.size());
response=am.allocate("h1",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,4,nmTokens,nm1);
Assert.assertEquals(1,nmTokens.size());
MockNM nm2=rm.registerNode("h2:1234",10000);
nm2.nodeHeartbeat(true);
ArrayList containersReceivedForNM2=new ArrayList();
response=am.allocate("h2",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,2,nmTokens,nm2);
Assert.assertEquals(2,nmTokens.size());
nm2=rm.registerNode("h2:1234",10000);
Map nodes=rm.getRMContext().getRMNodes();
while (nodes.get(nm2.getNodeId()).getLastNodeHeartBeatResponse().getResponseId() > 0) {
Thread.sleep(WAIT_SLEEP_MS);
}
int interval=40;
while (nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()) && interval-- > 0) {
LOG.info("waiting for nmToken to be cleared for : " + nm2.getNodeId());
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
nmTokens.remove(nm2.getNodeId().toString());
Assert.assertEquals(1,nmTokens.size());
response=am.allocate("h2",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,4,nmTokens,nm2);
Assert.assertEquals(2,nmTokens.size());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
nmTokenSecretManager.rollMasterKey();
nmTokenSecretManager.activateNextMasterKey();
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
nmTokens.clear();
Assert.assertEquals(0,nmTokens.size());
response=am.allocate("h2",1000,1,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,5,nmTokens,nm2);
Assert.assertEquals(1,nmTokens.size());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
am.unregisterAppAttempt();
for ( Container container : containersReceivedForNM1) {
nm1.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE);
}
for ( Container container : containersReceivedForNM2) {
nm2.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE);
}
nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
}
finally {
rm.stop();
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts");
if (excludeHostsFile.exists()) {
excludeHostsFile.delete();
}
if (!excludeHostsFile.createNewFile()) {
Assert.fail("Can not create " + "excludeHosts");
}
PrintWriter fileWriter=new PrintWriter(excludeHostsFile);
fileWriter.write("0.0.0.0:123");
fileWriter.close();
uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));
YarnConfiguration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda");
yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts");
uploadConfiguration(yarnConf,"yarn-site.xml");
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
String aclsString="alice,bob users,wheel";
Configuration newConf=new Configuration();
newConf.set("security.applicationclient.protocol.acl",aclsString);
uploadConfiguration(newConf,"hadoop-policy.xml");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,true);
conf.set("hadoop.proxyuser.test.groups","test_groups");
conf.set("hadoop.proxyuser.test.hosts","test_hosts");
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
MockUnixGroupsMapping.updateGroups();
ResourceManager resourceManager=null;
try {
try {
resourceManager=new ResourceManager();
resourceManager.init(configuration);
resourceManager.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
Set excludeHosts=resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts();
Assert.assertTrue(excludeHosts.size() == 1);
Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));
String aclStringAfter=resourceManager.adminService.getAccessControlList().getAclString().trim();
Assert.assertEquals(aclStringAfter,"world:anyone:rwcda");
CapacityScheduler cs=(CapacityScheduler)resourceManager.getRMContext().getScheduler();
int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
ServiceAuthorizationManager adminServiceServiceManager=resourceManager.adminService.getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(adminServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager clientRMServiceServiceManager=resourceManager.getRMContext().getClientRMService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(clientRMServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager appMasterService=resourceManager.getRMContext().getApplicationMasterService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(appMasterService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager RTService=resourceManager.getRMContext().getResourceTrackerService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(RTService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(UserGroupInformation.getCurrentUser().getUserName());
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
finally {
if (resourceManager != null) {
resourceManager.stop();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMHAWithFileSystemBasedConfiguration() throws IOException, YarnException {
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
configuration.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2");
int base=100;
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(configuration)) {
configuration.set(HAUtil.addSuffix(confKey,"rm1"),"0.0.0.0:" + (base + 20));
configuration.set(HAUtil.addSuffix(confKey,"rm2"),"0.0.0.0:" + (base + 40));
base=base * 2;
}
Configuration conf1=new Configuration(configuration);
conf1.set(YarnConfiguration.RM_HA_ID,"rm1");
Configuration conf2=new Configuration(configuration);
conf2.set(YarnConfiguration.RM_HA_ID,"rm2");
uploadDefaultConfiguration();
MockRM rm1=null;
MockRM rm2=null;
try {
rm1=new MockRM(conf1);
rm1.init(conf1);
rm1.start();
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
rm2=new MockRM(conf2);
rm2.init(conf1);
rm2.start();
Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
rm1.adminService.transitionToActive(requestInfo);
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
int maxApps=((CapacityScheduler)rm1.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxApps,5000);
int maxAppsBeforeFailOver=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsBeforeFailOver,10000);
rm1.adminService.transitionToStandby(requestInfo);
rm2.adminService.transitionToActive(requestInfo);
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);
int maxAppsAfter=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
}
finally {
if (rm1 != null) {
rm1.stop();
}
if (rm2 != null) {
rm2.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAdminAclsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
String aclStringBefore=rm.adminService.getAccessControlList().getAclString().trim();
YarnConfiguration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda");
uploadConfiguration(yarnConf,"yarn-site.xml");
rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance());
String aclStringAfter=rm.adminService.getAccessControlList().getAclString().trim();
Assert.assertTrue(!aclStringAfter.equals(aclStringBefore));
Assert.assertEquals(aclStringAfter,"world:anyone:rwcda");
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
String[] defaultTestUserGroups={"dummy_group1","dummy_group2"};
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("dummyUser",defaultTestUserGroups);
String user=ugi.getUserName();
List groupWithInit=new ArrayList(2);
for (int i=0; i < ugi.getGroupNames().length; i++) {
groupWithInit.add(ugi.getGroupNames()[i]);
}
uploadDefaultConfiguration();
Configuration conf=new Configuration();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
List groupBefore=new ArrayList(Groups.getUserToGroupsMappingService(configuration).getGroups(user));
Assert.assertTrue(groupBefore.contains("test_group_A") && groupBefore.contains("test_group_B") && groupBefore.contains("test_group_C")&& groupBefore.size() == 3);
Assert.assertTrue(groupWithInit.size() != groupBefore.size());
Assert.assertFalse(groupWithInit.contains("test_group_A") || groupWithInit.contains("test_group_B") || groupWithInit.contains("test_group_C"));
MockUnixGroupsMapping.updateGroups();
rm.adminService.refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequest.newInstance());
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(user);
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshNodesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts");
if (excludeHostsFile.exists()) {
excludeHostsFile.delete();
}
if (!excludeHostsFile.createNewFile()) {
Assert.fail("Can not create " + "excludeHosts");
}
PrintWriter fileWriter=new PrintWriter(excludeHostsFile);
fileWriter.write("0.0.0.0:123");
fileWriter.close();
uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));
Configuration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts");
uploadConfiguration(yarnConf,YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
rm.adminService.refreshNodes(RefreshNodesRequest.newInstance());
Set excludeHosts=rm.getNodesListManager().getHostsReader().getExcludedHosts();
Assert.assertTrue(excludeHosts.size() == 1);
Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAdminRefreshQueuesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler();
int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications();
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
Assert.assertTrue(maxAppsAfter != maxAppsBefore);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshSuperUserGroupsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
Configuration coreConf=new Configuration(false);
coreConf.set("hadoop.proxyuser.test.groups","test_groups");
coreConf.set("hadoop.proxyuser.test.hosts","test_hosts");
uploadConfiguration(coreConf,"core-site.xml");
rm.adminService.refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequest.newInstance());
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMDispatcherForHA() throws IOException {
String errorMessageForEventHandler="Expect to get the same number of handlers";
String errorMessageForService="Expect to get the same number of services";
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return new MyCountingDispatcher();
}
}
;
rm.init(conf);
int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount();
int expectedServiceCount=rm.getServices().size();
assertTrue(expectedEventHandlerCount != 0);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
rm.start();
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
assertTrue(!dispatcher.isStopped());
rm.adminService.transitionToActive(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
rm.adminService.transitionToStandby(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
assertTrue(dispatcher.isStopped());
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to verify the following RM HA transitions to the following states.
* 1. Standby: Should be a no-op
* 2. Active: Active services should start
* 3. Active: Should be a no-op.
* While active, submit a couple of jobs
* 4. Standby: Active services should stop
* 5. Active: Active services should start
* 6. Stop the RM: All services should stop and RM should not be ready to
* become Active
*/
@Test(timeout=30000) public void testFailoverAndTransitions() throws Exception {
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
checkMonitorHealth();
rm.start();
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,2,2,2,2048,2);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.stop();
assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive());
assertFalse("Active RM services are started",rm.areActiveServicesRunning());
checkMonitorHealth();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHAIDLookup(){
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM2_NODE_ID);
configuration.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID);
conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM1_NODE_ID);
configuration.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM3_NODE_ID);
configuration.unset(YarnConfiguration.RM_HA_ID);
conf=new YarnConfiguration(configuration);
try {
rm=new MockRM(conf);
rm.init(conf);
fail("Should get an exception here.");
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("Invalid configuration! Can not find valid RM_HA_ID."));
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRMRestartKilledAppWithNoAttempts() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
@Override public synchronized void storeApplicationAttemptStateInternal( ApplicationAttemptId attemptId, ApplicationAttemptStateData attemptStateData) throws Exception {
}
@Override public synchronized void updateApplicationAttemptStateInternal( ApplicationAttemptId attemptId, ApplicationAttemptStateData attemptStateData) throws Exception {
}
}
;
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
RMApp app0=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false);
rm1.killApp(app0.getApplicationId());
rm1.waitForState(app0.getApplicationId(),RMAppState.KILLED);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(loadedApp0.getApplicationId(),RMAppState.KILLED);
Assert.assertTrue(loadedApp0.getAppAttempts().size() == 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test(timeout=180000) public void testRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
MockNM nm2=new MockNM("127.0.0.2:5678",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm2.registerNode();
RMApp app0=rm1.submitApp(200);
RMAppAttempt attempt0=app0.getCurrentAppAttempt();
Assert.assertEquals(1,rmAppState.size());
nm1.nodeHeartbeat(true);
MockAM am0=rm1.sendAMLaunched(attempt0.getAppAttemptId());
am0.registerAppAttempt();
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200);
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId());
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt1.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
Assert.assertEquals(1,appState.getAttemptCount());
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockAM am1=rm1.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.allocate("127.0.0.1",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
List conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
RMApp app2=rm1.submitApp(200);
appState=rmAppState.get(app2.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app2.getApplicationSubmissionContext().getApplicationId());
RMApp appUnmanaged=rm1.submitApp(200,"someApp","someUser",null,true,null,conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null);
ApplicationAttemptId unmanagedAttemptId=appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
ApplicationId unmanagedAppId=appUnmanaged.getApplicationId();
appState=rmAppState.get(unmanagedAppId);
Assert.assertNotNull(appState);
rm1.waitForState(unmanagedAttemptId,RMAppAttemptState.LAUNCHED);
rm1.waitForState(unmanagedAppId,RMAppState.ACCEPTED);
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),appUnmanaged.getApplicationSubmissionContext().getApplicationId());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
Assert.assertEquals(4,rm2.getRMContext().getRMApps().size());
rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FINISHED);
RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
Assert.assertNotNull(loadedApp1);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(app1.getApplicationSubmissionContext().getApplicationId(),loadedApp1.getApplicationSubmissionContext().getApplicationId());
RMApp loadedApp2=rm2.getRMContext().getRMApps().get(app2.getApplicationId());
Assert.assertNotNull(loadedApp2);
Assert.assertEquals(app2.getApplicationSubmissionContext().getApplicationId(),loadedApp2.getApplicationSubmissionContext().getApplicationId());
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
rm2.waitForState(loadedApp2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(1,loadedApp2.getAppAttempts().size());
am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext());
AllocateResponse allocResponse=am1.allocate(new ArrayList(),new ArrayList());
Assert.assertEquals(AMCommand.AM_SHUTDOWN,allocResponse.getAMCommand());
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("127.0.0.1:1234",15120,rm2.getResourceTrackerService());
nm2=new MockNM("127.0.0.2:5678",15120,rm2.getResourceTrackerService());
NMContainerStatus status=TestRMRestart.createNMContainerStatus(loadedApp1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status),null);
nm2.registerNode();
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
int timeoutSecs=0;
while (loadedApp1.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
;
Thread.sleep(200);
}
hbResponse=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
attempt1=loadedApp1.getCurrentAppAttempt();
attemptId1=attempt1.getAppAttemptId();
rm2.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp1.getApplicationId());
attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockNM am1Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am1Node=nm2;
}
RMAppAttempt attempt2=loadedApp2.getCurrentAppAttempt();
ApplicationAttemptId attemptId2=attempt2.getAppAttemptId();
rm2.waitForState(attemptId2,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp2.getApplicationId());
attemptState=appState.getAttempt(attemptId2);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId2,1),attemptState.getMasterContainer().getId());
MockNM am2Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am2Node=nm2;
}
am1=rm2.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
MockAM am2=rm2.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
am1.allocate("127.0.0.1",1000,3,new ArrayList());
am2.allocate("127.0.0.2",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
finishApplicationMaster(loadedApp1,rm2,am1Node,am1);
finishApplicationMaster(loadedApp2,rm2,am2Node,am2);
rm2.stop();
rm1.stop();
Assert.assertEquals(4,rmAppState.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartFailedApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am0.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app0.getApplicationId(),RMAppState.FAILED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppState.FAILED,appState.getState());
Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(),RMAppState.FAILED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(1,loadedApp0.getAppAttempts().size());
verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertTrue(app0.getDiagnostics().toString().contains("Failing the application."));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMStateStoreDispatcherDrainedOnRMStop() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
volatile boolean wait=true;
@Override public void serviceStop() throws Exception {
wait=false;
super.serviceStop();
}
@Override protected void handleStoreEvent( RMStateStoreEvent event){
while (wait) ;
super.handleStoreEvent(event);
}
}
;
memStore.init(conf);
final MockRM rm1=new MockRM(conf,memStore);
rm1.start();
final ArrayList appList=new ArrayList();
final int NUM_APPS=5;
for (int i=0; i < NUM_APPS; i++) {
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false);
appList.add(app);
rm1.waitForState(app.getApplicationId(),RMAppState.NEW_SAVING);
}
Map rmAppState=memStore.getState().getApplicationState();
Assert.assertTrue(rmAppState.size() == 0);
rm1.stop();
for ( RMApp app : appList) {
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app.getApplicationSubmissionContext().getApplicationId());
}
Assert.assertTrue(rmAppState.size() == NUM_APPS);
}
TestInitializer BooleanVerifier HybridVerifier
@Before public void setup() throws UnknownHostException {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf=new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED,"true");
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
rmAddr=new InetSocketAddress("localhost",8032);
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartGetApplicationList() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am0=launchAM(app0,rm1,nm1);
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am1=launchAM(app1,rm1,nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED);
RMApp app2=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am2=launchAM(app2,rm1,nm1);
rm1.killApp(app2.getApplicationId());
rm1.waitForState(app2.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.KILLED);
MockRM rm2=new MockRM(conf,memStore){
@Override protected RMAppManager createRMAppManager(){
return spy(super.createRMAppManager());
}
}
;
rm2.start();
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm2.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
boolean forApp0=false, forApp1=false, forApp2=false;
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app0.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FINISHED,report.getYarnApplicationState());
forApp0=true;
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FAILED,report.getYarnApplicationState());
forApp1=true;
}
if (report.getApplicationId().equals(app2.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.KILLED,report.getYarnApplicationState());
forApp2=true;
}
}
Assert.assertTrue(forApp0 && forApp1 && forApp2);
Set appTypes=new HashSet();
appTypes.add("myType");
GetApplicationsRequest request2=GetApplicationsRequest.newInstance(appTypes);
GetApplicationsResponse response2=rm2.getClientRMService().getApplications(request2);
List appList2=response2.getApplicationList();
Assert.assertTrue(3 == appList2.size());
verify(rm2.getRMAppManager(),times(3)).logApplicationSummary(isA(ApplicationId.class));
rm1.stop();
rm2.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRMShutdown() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
@Override public synchronized void checkVersion() throws Exception {
throw new Exception("Invalid version.");
}
}
;
memStore.init(conf);
MockRM rm1=null;
try {
rm1=new MockRM(conf,memStore);
rm1.start();
Assert.fail();
}
catch ( Exception e) {
Assert.assertTrue(e.getMessage().contains("Invalid version."));
}
Assert.assertTrue(rm1.getServiceState() == STATE.STOPPED);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testClientRetryOnKillingApplication() throws Exception {
MemoryRMStateStore memStore=new TestMemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am1=launchAM(app1,rm1,nm1);
KillApplicationResponse response;
int count=0;
while (true) {
response=rm1.killApp(app1.getApplicationId());
if (response.getIsKillCompleted()) {
break;
}
Thread.sleep(100);
count++;
}
Assert.assertTrue(count >= 1);
rm1.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.KILLED);
rm1.waitForState(app1.getApplicationId(),RMAppState.KILLED);
Assert.assertEquals(1,((TestMemoryRMStateStore)memStore).updateAttempt);
Assert.assertEquals(2,((TestMemoryRMStateStore)memStore).updateApp);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMDelegationTokenRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(YarnConfiguration.RM_ADDRESS,"localhost:8032");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState();
Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
Credentials ts=new Credentials();
GetDelegationTokenRequest request1=GetDelegationTokenRequest.newInstance("renewer1");
UserGroupInformation.getCurrentUser().setAuthenticationMethod(AuthMethod.KERBEROS);
GetDelegationTokenResponse response1=rm1.getClientRMService().getDelegationToken(request1);
org.apache.hadoop.yarn.api.records.Token delegationToken1=response1.getRMDelegationToken();
Token token1=ConverterUtils.convertFromYarn(delegationToken1,rmAddr);
RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier();
HashSet tokenIdentSet=new HashSet();
ts.addToken(token1.getService(),token1);
tokenIdentSet.add(dtId1);
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts);
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Set allKeysRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys();
Assert.assertEquals(allKeysRM1,rmDTMasterKeyState);
Map allTokensRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(tokenIdentSet,allTokensRM1.keySet());
Assert.assertEquals(allTokensRM1,rmDTState);
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rmState.getRMDTSecretManagerState().getDTSequenceNumber());
GetDelegationTokenRequest request2=GetDelegationTokenRequest.newInstance("renewer2");
GetDelegationTokenResponse response2=rm1.getClientRMService().getDelegationToken(request2);
org.apache.hadoop.yarn.api.records.Token delegationToken2=response2.getRMDelegationToken();
Token token2=ConverterUtils.convertFromYarn(delegationToken2,rmAddr);
RMDelegationTokenIdentifier dtId2=token2.decodeIdentifier();
try {
rm1.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token2,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),dtId2.getSequenceNumber());
Assert.assertFalse(rmDTState.containsKey(dtId2));
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
Map allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(allTokensRM2.keySet(),allTokensRM1.keySet());
Assert.assertTrue(rm2.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys().containsAll(allKeysRM1));
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rm2.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber());
Long renewDateBeforeRenew=allTokensRM2.get(dtId1);
try {
Thread.sleep(1);
rm2.getRMContext().getRMDelegationTokenSecretManager().renewToken(token1,"renewer1");
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Long renewDateAfterRenew=allTokensRM2.get(dtId1);
Assert.assertTrue(renewDateAfterRenew > renewDateBeforeRenew);
Assert.assertTrue(rmDTState.containsValue(renewDateAfterRenew));
Assert.assertFalse(rmDTState.containsValue(renewDateBeforeRenew));
try {
rm2.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token1,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertFalse(allTokensRM2.containsKey(dtId1));
Assert.assertFalse(rmDTState.containsKey(dtId1));
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReboot() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:1234",2048);
int initialMetricCount=ClusterMetrics.getMetrics().getNumRebootedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(new HashMap>(),true,-100);
Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals("Too far behind rm response id:0 nm response id:-100",nodeHeartbeat.getDiagnosticsMessage());
checkRebootedNMCount(rm,++initialMetricCount);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Decommissioning using a pre-configured include hosts file
*/
@Test public void testDecommissionWithIncludeHosts() throws Exception {
writeToHostsFile("localhost","host1","host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
MockNM nm3=rm.registerNode("localhost:4433",1024);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int metricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
String ip=NetUtils.normalizeHostName("localhost");
writeToHostsFile("host1",ip);
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals(0,ClusterMetrics.getMetrics().getNumDecommisionedNMs());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue("Node is not decommisioned.",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
checkDecommissionedNMCount(rm,++metricCount);
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals(metricCount,ClusterMetrics.getMetrics().getNumDecommisionedNMs());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeRegistrationVersionLessThanRM() throws Exception {
writeToHostsFile("host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM");
rm=new MockRM(conf);
rm.start();
String nmVersion="1.9.9";
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
Resource capability=BuilderUtils.newResource(1024,1);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(nmVersion);
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + "Version " + nmVersion + ", is less than the minimum version'",response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + nmVersion + ", is less than the minimum version "));
}
InternalCallVerifier BooleanVerifier
/**
* Decommissioning using a pre-configured exclude hosts file
*/
@Test public void testDecommissionWithExcludeHosts() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath());
writeToHostsFile("");
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
MockNM nm3=rm.registerNode("localhost:4433",1024);
int metricCount=ClusterMetrics.getMetrics().getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
String ip=NetUtils.normalizeHostName("localhost");
writeToHostsFile("host2",ip);
rm.getNodesListManager().refreshNodes(conf);
checkDecommissionedNMCount(rm,metricCount + 2);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue("The decommisioned metrics are not updated",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue("The decommisioned metrics are not updated",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReconnectNode() throws Exception {
final DrainDispatcher dispatcher=new DrainDispatcher();
rm=new MockRM(){
@Override protected EventHandler createSchedulerEventDispatcher(){
return new SchedulerEventDispatcher(this.scheduler){
@Override public void handle( SchedulerEvent event){
scheduler.handle(event);
}
}
;
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",5120);
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(false);
dispatcher.await();
checkUnealthyNMCount(rm,nm2,true,1);
final int expectedNMs=ClusterMetrics.getMetrics().getNumActiveNMs();
QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(5120,metrics.getAvailableMB());
nm1=rm.registerNode("host1:1234",5120);
NodeHeartbeatResponse response=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
response=nm2.nodeHeartbeat(false);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
dispatcher.await();
response=nm2.nodeHeartbeat(true);
response=nm2.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(5120 + 5120,metrics.getAvailableMB());
nm1=rm.registerNode("host2:5678",10240);
dispatcher.await();
response=nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
Assert.assertEquals(5120 + 10240,metrics.getAvailableMB());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithSavedApplicationState() throws Exception {
startRMs();
RMApp app0=rm1.submitApp(200);
explicitFailover();
Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId());
Assert.assertEquals(app1.getApplicationId(),app0.getApplicationId());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithoutSavedApplicationState() throws Exception {
startRMsWithCustomizedRMAppManager();
RMApp app0=rm1.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false);
explicitFailover();
Assert.assertFalse(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId());
verifySubmitApp(rm2,app1,app0.getApplicationId());
Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test multiple calls of getApplicationReport, to make sure
* it is idempotent
*/
@Test public void testGetApplicationReportIdempotent() throws Exception {
startRMs();
RMApp app=rm1.submitApp(200);
ApplicationReport appReport1=rm1.getApplicationReport(app.getApplicationId());
Assert.assertTrue(appReport1.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport1.getYarnApplicationState() == YarnApplicationState.SUBMITTED);
ApplicationReport appReport2=rm1.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport1.getApplicationId(),appReport2.getApplicationId());
Assert.assertEquals(appReport1.getYarnApplicationState(),appReport2.getYarnApplicationState());
explicitFailover();
ApplicationReport appReport3=rm2.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport1.getApplicationId(),appReport3.getApplicationId());
Assert.assertEquals(appReport1.getYarnApplicationState(),appReport3.getYarnApplicationState());
ApplicationReport appReport4=rm2.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport3.getApplicationId(),appReport4.getApplicationId());
Assert.assertEquals(appReport3.getYarnApplicationState(),appReport4.getYarnApplicationState());
}
InternalCallVerifier BooleanVerifier
@Test public void testHandleRMHAafterSubmitApplicationCallWithSavedApplicationState() throws Exception {
startRMs();
RMApp app0=rm1.submitApp(200);
explicitFailover();
ApplicationReport appReport=rm2.getApplicationReport(app0.getApplicationId());
Assert.assertTrue(appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport.getYarnApplicationState() == YarnApplicationState.SUBMITTED);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testAMContainerStatusWithRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1_1=rm1.submitApp(1024);
MockAM am1_1=MockRM.launchAndRegisterAM(app1_1,rm1,nm1);
RMAppAttempt attempt0=app1_1.getCurrentAppAttempt();
AbstractYarnScheduler scheduler=((AbstractYarnScheduler)rm1.getResourceScheduler());
Assert.assertTrue(scheduler.getRMContainer(attempt0.getMasterContainer().getId()).isAMContainer());
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
List am1_1Containers=createNMContainerStatusForApp(am1_1);
nm1.registerNode(am1_1Containers,null);
waitForNumContainersToRecover(2,rm2,am1_1.getApplicationAttemptId());
scheduler=((AbstractYarnScheduler)rm2.getResourceScheduler());
Assert.assertTrue(scheduler.getRMContainer(attempt0.getMasterContainer().getId()).isAMContainer());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testSchedulerRecovery() throws Exception {
conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS,true);
conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,DominantResourceCalculator.class.getName());
int containerMemory=1024;
Resource containerResource=Resource.newInstance(containerMemory,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
rm1.clearQueueMetrics(app1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
RMAppAttempt loadedAttempt1=recoveredApp1.getCurrentAppAttempt();
NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.RUNNING);
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null);
waitForNumContainersToRecover(2,rm2,am1.getApplicationAttemptId());
Set launchedContainers=((RMNodeImpl)rm2.getRMContext().getRMNodes().get(nm1.getNodeId())).getLaunchedContainers();
assertTrue(launchedContainers.contains(amContainer.getContainerId()));
assertTrue(launchedContainers.contains(runningContainer.getContainerId()));
rm2.waitForState(nm1,amContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForState(nm1,runningContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForContainerToComplete(loadedAttempt1,completedContainer);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
SchedulerNode schedulerNode1=scheduler.getSchedulerNode(nm1.getNodeId());
Resource usedResources=Resources.multiply(containerResource,2);
Resource nmResource=Resource.newInstance(nm1.getMemory(),nm1.getvCores());
assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId()));
assertTrue(schedulerNode1.isValidContainer(runningContainer.getContainerId()));
assertFalse(schedulerNode1.isValidContainer(completedContainer.getContainerId()));
assertEquals(2,schedulerNode1.getNumContainers());
assertEquals(Resources.subtract(nmResource,usedResources),schedulerNode1.getAvailableResource());
assertEquals(usedResources,schedulerNode1.getUsedResource());
Resource availableResources=Resources.subtract(nmResource,usedResources);
Map schedulerApps=((AbstractYarnScheduler)rm2.getResourceScheduler()).getSchedulerApplications();
SchedulerApplication schedulerApp=schedulerApps.get(recoveredApp1.getApplicationId());
if (schedulerClass.equals(CapacityScheduler.class)) {
checkCSQueue(rm2,schedulerApp,nmResource,nmResource,usedResources,2);
}
else if (schedulerClass.equals(FifoScheduler.class)) {
checkFifoQueue(schedulerApp,usedResources,availableResources);
}
SchedulerApplicationAttempt schedulerAttempt=schedulerApp.getCurrentAppAttempt();
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(amContainer.getContainerId())));
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(runningContainer.getContainerId())));
assertEquals(schedulerAttempt.getCurrentConsumption(),usedResources);
if (scheduler.getClass() != FairScheduler.class) {
assertEquals(availableResources,schedulerAttempt.getHeadroom());
}
assertEquals((1 << 22) + 1,schedulerAttempt.getNewContainerId());
}
BooleanVerifier
@Test(timeout=30000) public void testReleasedContainerNotRecovered() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
rm1.start();
RMApp app1=rm1.submitApp(1024);
final MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,8000);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext());
am1.registerAppAttempt(true);
final ContainerId runningContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
am1.allocate(null,Arrays.asList(runningContainer));
List containerStatuses=createNMContainerStatusForApp(am1);
nm1.registerNode(containerStatuses,null);
waitForNumContainersToRecover(1,rm2,am1.getApplicationAttemptId());
final AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
AllocateResponse response=am1.allocate(null,null);
boolean receivedCompletedContainer=false;
for ( ContainerStatus status : response.getCompletedContainersStatuses()) {
if (status.getContainerId().equals(runningContainer)) {
receivedCompletedContainer=true;
}
}
assertTrue(receivedCompletedContainer);
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
return scheduler.getApplicationAttempt(am1.getApplicationAttemptId()).getPendingRelease().isEmpty() && scheduler.getRMContainer(runningContainer) == null;
}
}
,1000,20000);
}
BooleanVerifier
@Test public void testParallelWrite() throws Exception {
List appIds=new ArrayList();
for (int i=0; i < 10; ++i) {
Random rand=new Random(i);
ApplicationId appId=ApplicationId.newInstance(0,rand.nextInt());
appIds.add(appId);
RMApp app=createRMApp(appId);
writer.applicationStarted(app);
for (int j=1; j <= 10; ++j) {
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,j);
RMAppAttempt appAttempt=createRMAppAttempt(appAttemptId);
writer.applicationAttemptStarted(appAttempt);
for (int k=1; k <= 10; ++k) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,k);
RMContainer container=createRMContainer(containerId);
writer.containerStarted(container);
writer.containerFinished(container);
}
writer.applicationAttemptFinished(appAttempt,RMAppAttemptState.FINISHED);
}
writer.applicationFinished(app,RMAppState.FINISHED);
}
for (int i=0; i < MAX_RETRIES; ++i) {
if (allEventsHandled(20 * 10 * 10 + 20 * 10 + 20)) {
break;
}
else {
Thread.sleep(500);
}
}
Assert.assertTrue(allEventsHandled(20 * 10 * 10 + 20 * 10 + 20));
for ( ApplicationId appId : appIds) {
Assert.assertTrue(handledByOne(appId));
}
}
BooleanVerifier
@Test public void testRMWritingMassiveHistory() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm=new MockRM(conf){
@Override protected RMApplicationHistoryWriter createRMApplicationHistoryWriter(){
return new RMApplicationHistoryWriter(){
@Override public void applicationStarted( RMApp app){
}
@Override public void applicationFinished( RMApp app, RMAppState finalState){
}
@Override public void applicationAttemptStarted( RMAppAttempt appAttempt){
}
@Override public void applicationAttemptFinished( RMAppAttempt appAttempt, RMAppAttemptState finalState){
}
@Override public void containerStarted( RMContainer container){
}
@Override public void containerFinished( RMContainer container){
}
}
;
}
}
;
long startTime1=System.currentTimeMillis();
testRMWritingMassiveHistory(rm);
long finishTime1=System.currentTimeMillis();
long elapsedTime1=finishTime1 - startTime1;
rm=new MockRM(conf);
long startTime2=System.currentTimeMillis();
testRMWritingMassiveHistory(rm);
long finishTime2=System.currentTimeMillis();
long elapsedTime2=finishTime2 - startTime2;
Assert.assertTrue(elapsedTime2 - elapsedTime1 < elapsedTime1 / 10);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testARRMResponseId() throws Exception {
MockNM nm1=rm.registerNode("h1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(1,response.getResponseId());
Assert.assertTrue(response.getAMCommand() == null);
allocateRequest=AllocateRequest.newInstance(response.getResponseId(),0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertTrue(response.getAMCommand() == AMCommand.AM_RESYNC);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testAMRestartWithExistingContainers() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",10240,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.0.0.1:2351",4089,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
int NUM_CONTAINERS=3;
am1.allocate("127.0.0.1",1024,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (containers.size() != NUM_CONTAINERS) {
nm1.nodeHeartbeat(true);
containers.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(200);
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
ContainerId containerId4=ContainerId.newInstance(am1.getApplicationAttemptId(),4);
rm1.waitForState(nm1,containerId4,RMContainerState.ACQUIRED);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
nm1.nodeHeartbeat(true);
ContainerId containerId5=ContainerId.newInstance(am1.getApplicationAttemptId(),5);
rm1.waitForContainerAllocated(nm1,containerId5);
rm1.waitForState(nm1,containerId5,RMContainerState.ALLOCATED);
am1.allocate("127.0.0.1",6000,1,new ArrayList());
ContainerId containerId6=ContainerId.newInstance(am1.getApplicationAttemptId(),6);
nm1.nodeHeartbeat(true);
SchedulerApplicationAttempt schedulerAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6);
while (schedulerAttempt.getReservedContainers().isEmpty()) {
System.out.println("Waiting for container " + containerId6 + " to be reserved.");
nm1.nodeHeartbeat(true);
Thread.sleep(200);
}
Assert.assertEquals(containerId6,schedulerAttempt.getReservedContainers().get(0).getContainerId());
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
Thread.sleep(3000);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4));
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5));
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationAttemptId newAttemptId=app1.getCurrentAppAttempt().getAppAttemptId();
Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
RMAppAttempt attempt2=app1.getCurrentAppAttempt();
nm1.nodeHeartbeat(true);
MockAM am2=rm1.sendAMLaunched(attempt2.getAppAttemptId());
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
Assert.assertEquals(2,registerResponse.getContainersFromPreviousAttempts().size());
boolean containerId2Exists=false, containerId3Exists=false;
for ( Container container : registerResponse.getContainersFromPreviousAttempts()) {
if (container.getId().equals(containerId2)) {
containerId2Exists=true;
}
if (container.getId().equals(containerId3)) {
containerId3Exists=true;
}
}
Assert.assertTrue(containerId2Exists && containerId3Exists);
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
RMAppAttempt newAttempt=app1.getRMAppAttempt(am2.getApplicationAttemptId());
waitForContainersToFinish(4,newAttempt);
boolean container3Exists=false, container4Exists=false, container5Exists=false, container6Exists=false;
for ( ContainerStatus status : newAttempt.getJustFinishedContainers()) {
if (status.getContainerId().equals(containerId3)) {
container3Exists=true;
}
if (status.getContainerId().equals(containerId4)) {
container4Exists=true;
}
if (status.getContainerId().equals(containerId5)) {
container5Exists=true;
}
if (status.getContainerId().equals(containerId6)) {
container6Exists=true;
}
}
Assert.assertTrue(container3Exists && container4Exists && container5Exists&& container6Exists);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
SchedulerApplicationAttempt schedulerNewAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am2);
Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2));
System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers());
waitForContainersToFinish(5,newAttempt);
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNMTokensRebindOnAMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,3);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"myname","myuser",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.1.1.1:4321",8000,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am1.allocate("127.0.0.1",2000,2,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == 2) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=MockRM.launchAM(app1,rm1,nm1);
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
Assert.assertEquals(expectedNMTokens,registerResponse.getNMTokensFromPreviousAttempts());
containers=new ArrayList();
while (true) {
AllocateResponse allocateResponse=am2.allocate("127.1.1.1",4000,1,new ArrayList());
nm2.nodeHeartbeat(true);
containers.addAll(allocateResponse.getAllocatedContainers());
expectedNMTokens.addAll(allocateResponse.getNMTokens());
if (containers.size() == 1) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId am2ContainerId2=ContainerId.newInstance(am2.getApplicationAttemptId(),2);
rm1.waitForState(nm1,am2ContainerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am3=MockRM.launchAM(app1,rm1,nm1);
registerResponse=am3.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
List transferredTokens=registerResponse.getNMTokensFromPreviousAttempts();
Assert.assertEquals(2,transferredTokens.size());
Assert.assertTrue(transferredTokens.containsAll(expectedNMTokens));
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testPreemptedAMRestartOnRMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler();
ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer));
am1.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(ContainerExitStatus.PREEMPTED,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
MockRM rm2=new MockRM(conf,memStore);
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1.registerNode();
rm2.start();
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt2=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt2.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler();
ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer));
am1.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
MockAM am2=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
RMAppAttempt attempt2=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt2).mayBeLastAttempt());
ContainerId amContainer2=ContainerId.newInstance(am2.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer2));
am2.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt2.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am3=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),3,nm1);
RMAppAttempt attempt3=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt3).mayBeLastAttempt());
ContainerStatus containerStatus=Records.newRecord(ContainerStatus.class);
containerStatus.setContainerId(attempt3.getMasterContainer().getId());
containerStatus.setDiagnostics("mimic NM disk_failure");
containerStatus.setState(ContainerState.COMPLETE);
containerStatus.setExitStatus(ContainerExitStatus.DISKS_FAILED);
Map> conts=new HashMap>();
conts.put(app1.getApplicationId(),Collections.singletonList(containerStatus));
nm1.nodeHeartbeat(conts,true);
am3.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt3.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.DISKS_FAILED,appState.getAttempt(am3.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am4=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),4,nm1);
RMAppAttempt attempt4=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt4).mayBeLastAttempt());
MockNM nm2=new MockNM("127.0.0.1:2234",8000,rm1.getResourceTrackerService());
nm2.registerNode();
nm1.nodeHeartbeat(false);
am4.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt4.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.ABORTED,appState.getAttempt(am4.getApplicationAttemptId()).getAMContainerExitStatus());
nm2.nodeHeartbeat(true);
MockAM am5=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),5,nm2);
RMAppAttempt attempt5=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt5).mayBeLastAttempt());
nm2.nodeHeartbeat(am5.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am5.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(attempt5.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED);
Assert.assertEquals(5,app1.getAppAttempts().size());
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testRMRestartOrFailoverNotCountedForAMFailures() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt1).mayBeLastAttempt());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus status=Records.newRecord(NMContainerStatus.class);
status.setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER);
status.setContainerId(attempt1.getMasterContainer().getId());
status.setContainerState(ContainerState.COMPLETE);
status.setDiagnostics("");
nm1.registerNode(Collections.singletonList(status),null);
rm2.waitForState(attempt1.getAppAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt3=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
BooleanVerifier
@Test(timeout=30000) public void testFSRMStateStoreClientRetry() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
try {
TestFSRMStateStoreTester fsTester=new TestFSRMStateStoreTester(cluster);
final RMStateStore store=fsTester.getRMStateStore();
store.setRMDispatcher(new TestDispatcher());
final AtomicBoolean assertionFailedInThread=new AtomicBoolean(false);
cluster.shutdownNameNodes();
Thread clientThread=new Thread(){
@Override public void run(){
try {
store.storeApplicationStateInternal(ApplicationId.newInstance(100L,1),ApplicationStateData.newInstance(111,111,"user",null,RMAppState.ACCEPTED,"diagnostics",333));
}
catch ( Exception e) {
if (!e.getMessage().contains("could only be replicated" + " to 0 nodes instead of minReplication (=1)")) {
assertionFailedInThread.set(true);
}
e.printStackTrace();
}
}
}
;
Thread.sleep(2000);
clientThread.start();
cluster.restartNameNode();
clientThread.join();
Assert.assertFalse(assertionFailedInThread.get());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test(timeout=60000) public void testFSRMStateStore() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
fsTester=new TestFSRMStateStoreTester(cluster);
FSDataOutputStream fsOut=null;
FileSystemRMStateStore fileSystemRMStateStore=(FileSystemRMStateStore)fsTester.getRMStateStore();
String appAttemptIdStr3="appattempt_1352994193343_0001_000003";
ApplicationAttemptId attemptId3=ConverterUtils.toApplicationAttemptId(appAttemptIdStr3);
Path appDir=fsTester.store.getAppDir(attemptId3.getApplicationId().toString());
Path tempAppAttemptFile=new Path(appDir,attemptId3.toString() + ".tmp");
fsOut=fileSystemRMStateStore.fs.create(tempAppAttemptFile,false);
fsOut.write("Some random data ".getBytes());
fsOut.close();
testRMAppStateStore(fsTester);
Assert.assertFalse(fsTester.workingDirPathURI.getFileSystem(conf).exists(tempAppAttemptFile));
testRMDTSecretManagerStateStore(fsTester);
testCheckVersion(fsTester);
testEpoch(fsTester);
testAppDeletion(fsTester);
testDeleteStore(fsTester);
testAMRMTokenSecretManagerStateStore(fsTester);
}
finally {
cluster.shutdown();
}
}
BooleanVerifier
@Test(timeout=20000) public void testZKClientRetry() throws Exception {
TestZKClient zkClientTester=new TestZKClient();
final String path="/test";
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS);
conf.setLong(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS,100);
final ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher=new TestDispatcher();
store.setRMDispatcher(dispatcher);
final AtomicBoolean assertionFailedInThread=new AtomicBoolean(false);
stopServer();
Thread clientThread=new Thread(){
@Override public void run(){
try {
store.getDataWithRetries(path,true);
}
catch ( Exception e) {
e.printStackTrace();
assertionFailedInThread.set(true);
}
}
}
;
Thread.sleep(2000);
startServer();
clientThread.join();
Assert.assertFalse(assertionFailedInThread.get());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testZKClientDisconnectAndReconnect() throws Exception {
TestZKClient zkClientTester=new TestZKClient();
String path="/test";
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS);
ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher=new TestDispatcher();
store.setRMDispatcher(dispatcher);
store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT);
store.getDataWithRetries(path,true);
store.setDataWithRetries(path,"newBytes".getBytes(),0);
stopServer();
zkClientTester.watcher.waitForDisconnected(ZK_OP_WAIT_TIME);
try {
store.getDataWithRetries(path,true);
fail("Expected ZKClient time out exception");
}
catch ( Exception e) {
assertTrue(e.getMessage().contains("Wait for ZKClient creation timed out"));
}
startServer();
zkClientTester.watcher.waitForConnected(ZK_OP_WAIT_TIME);
byte[] ret=null;
try {
ret=store.getDataWithRetries(path,true);
}
catch ( Exception e) {
String error="ZKRMStateStore Session restore failed";
LOG.error(error,e);
fail(error);
}
assertEquals("newBytes",new String(ret));
}
BooleanVerifier
@Test(timeout=1000) public void testFitsIn(){
assertTrue(fitsIn(createResource(1,1),createResource(2,2)));
assertTrue(fitsIn(createResource(2,2),createResource(2,2)));
assertFalse(fitsIn(createResource(2,2),createResource(1,1)));
assertFalse(fitsIn(createResource(1,2),createResource(2,1)));
assertFalse(fitsIn(createResource(2,1),createResource(1,2)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRPCResponseId() throws IOException, YarnException {
String node="localhost";
Resource capability=BuilderUtils.newResource(1024,1);
RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
nodeId=NodeId.newInstance(node,1234);
request.setNodeId(nodeId);
request.setHttpPort(0);
request.setResource(capability);
RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
request1.setNodeId(nodeId);
request1.setHttpPort(0);
request1.setResource(capability);
resourceTrackerService.registerNodeManager(request1);
org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus=recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
nodeStatus.setNodeId(nodeId);
NodeHealthStatus nodeHealthStatus=recordFactory.newRecordInstance(NodeHealthStatus.class);
nodeHealthStatus.setIsNodeHealthy(true);
nodeStatus.setNodeHealthStatus(nodeHealthStatus);
NodeHeartbeatRequest nodeHeartBeatRequest=recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
nodeHeartBeatRequest.setNodeStatus(nodeStatus);
nodeStatus.setResponseId(0);
NodeHeartbeatResponse response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 1);
nodeStatus.setResponseId(response.getResponseId());
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 2);
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 2);
nodeStatus.setResponseId(0);
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(NodeAction.RESYNC.equals(response.getNodeAction()));
Assert.assertEquals("Too far behind rm response id:2 nm response id:0",response.getDiagnosticsMessage());
}
APIUtilityVerifier BooleanVerifier
@Test public void testAppSuccessPath() throws IOException {
LOG.info("--- START: testAppSuccessPath ---");
final String diagMsg="some diagnostics";
RMApp application=testCreateAppFinished(null,diagMsg);
Assert.assertTrue("Finished application missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
}
BooleanVerifier
@Test public void testAppAcceptedFailed() throws IOException {
LOG.info("--- START: testAppAcceptedFailed ---");
RMApp application=testCreateAppAccepted(null);
Assert.assertTrue(maxAppAttempts > 1);
for (int i=1; i < maxAppAttempts; i++) {
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
assertAppState(RMAppState.ACCEPTED,application);
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_ACCEPTED);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.ACCEPTED,application);
}
String message="Test fail";
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,message,false);
application.handle(event);
rmDispatcher.await();
sendAppUpdateSavedEvent(application);
assertFailed(application,".*" + message + ".*Failing the application.*");
assertAppFinalStateSaved(application);
verifyApplicationFinished(RMAppState.FAILED);
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppRunningFailed() throws IOException {
LOG.info("--- START: testAppRunningFailed ---");
RMApp application=testCreateAppRunning(null);
RMAppAttempt appAttempt=application.getCurrentAppAttempt();
int expectedAttemptId=1;
Assert.assertEquals(expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId());
Assert.assertTrue(maxAppAttempts > 1);
for (int i=1; i < maxAppAttempts; i++) {
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.ACCEPTED,application);
appAttempt=application.getCurrentAppAttempt();
Assert.assertEquals(++expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId());
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_ACCEPTED);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.ACCEPTED,application);
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_REGISTERED);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.RUNNING,application);
}
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
sendAppUpdateSavedEvent(application);
assertFailed(application,".*Failing the application.*");
assertAppFinalStateSaved(application);
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL);
application.handle(event);
rmDispatcher.await();
assertFailed(application,".*Failing the application.*");
assertAppFinalStateSaved(application);
verifyApplicationFinished(RMAppState.FAILED);
}
InternalCallVerifier BooleanVerifier
@Test public void testAppFinalSavingToFinished() throws IOException {
LOG.info("--- START: testAppFinalSavingToFinished ---");
RMApp application=testCreateAppFinalSaving(null);
final String diagMsg="some diagnostics";
RMAppEvent event=new RMAppFinishedAttemptEvent(application.getApplicationId(),diagMsg);
application.handle(event);
assertAppState(RMAppState.FINAL_SAVING,application);
RMAppEvent appUpdated=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_UPDATE_SAVED);
application.handle(appUpdated);
assertAppState(RMAppState.FINISHED,application);
assertTimesAtFinish(application);
assertFinalAppStatus(FinalApplicationStatus.FAILED,application);
Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUnmanagedApp() throws IOException {
ApplicationSubmissionContext subContext=new ApplicationSubmissionContextPBImpl();
subContext.setUnmanagedAM(true);
LOG.info("--- START: testUnmanagedAppSuccessPath ---");
final String diagMsg="some diagnostics";
RMApp application=testCreateAppFinished(subContext,diagMsg);
Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
reset(writer);
LOG.info("--- START: testUnmanagedAppFailPath ---");
application=testCreateAppRunning(subContext);
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
RMAppAttempt appAttempt=application.getCurrentAppAttempt();
Assert.assertEquals(1,appAttempt.getAppAttemptId().getAttemptId());
sendAppUpdateSavedEvent(application);
assertFailed(application,".*Unmanaged application.*Failing the application.*");
assertAppFinalStateSaved(application);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainersCleanupForLastAttempt(){
applicationAttempt=new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(),rmContext,scheduler,masterService,submissionContext,new Configuration(),true);
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
when(submissionContext.getMaxAppAttempts()).thenReturn(1);
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertFalse(transferStateFromPreviousAttempt);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedToFailed(){
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue(transferStateFromPreviousAttempt);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
ContainerStatus cs2=ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId,2),ContainerState.COMPLETE,"",0);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs2));
assertEquals(1,applicationAttempt.getJustFinishedContainers().size());
assertEquals(cs2.getContainerId(),applicationAttempt.getJustFinishedContainers().get(0).getContainerId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLaunchedExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testRunningExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUnmanagedAMContainersCleanup(){
unmanagedAM=true;
when(submissionContext.getUnmanagedAM()).thenReturn(true);
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
submitApplicationAttempt();
applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl"));
assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertFalse(transferStateFromPreviousAttempt);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCollectAllMetrics(){
String queueName="single";
QueueMetrics.forQueue(ms,queueName,null,false,conf);
MetricsSource queueSource=queueSource(ms,queueName);
checkApps(queueSource,0,0,0,0,0,0,true);
try {
checkApps(queueSource,0,0,0,0,0,0,false);
Assert.fail();
}
catch ( AssertionError e) {
Assert.assertTrue(e.getMessage().contains("Expected exactly one metric for name "));
}
checkApps(queueSource,0,0,0,0,0,0,true);
}
BooleanVerifier
@Test public void testComparePriorities(){
Priority high=Priority.newInstance(1);
Priority low=Priority.newInstance(2);
assertTrue(high.compareTo(low) > 0);
}
BooleanVerifier
@Test public void testValidateResourceBlacklistRequest() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(new YarnConfiguration(),containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
Map acls=new HashMap(2);
acls.put(ApplicationAccessType.VIEW_APP,"*");
RMApp app=rm.submitApp(1024,"appname","appuser",acls);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rmBindAddress,conf);
}
}
);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
client.registerApplicationMaster(request);
ResourceBlacklistRequest blacklistRequest=ResourceBlacklistRequest.newInstance(Collections.singletonList(ResourceRequest.ANY),null);
AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0.0f,null,null,blacklistRequest);
boolean error=false;
try {
client.allocate(allocateRequest);
}
catch ( InvalidResourceBlacklistRequestException e) {
error=true;
}
rm.stop();
Assert.assertTrue("Didn't not catch InvalidResourceBlacklistRequestException",error);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testActiveLimitsWithKilledApps() throws Exception {
final String user_0="user_0";
int APPLICATION_ID=0;
doReturn(2).when(queue).getMaximumActiveApplications();
FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_0,user_0);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_0));
FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_1,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_1));
FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_2,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_2));
FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_3,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(2,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(2,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_3));
queue.finishApplicationAttempt(app_2,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertFalse(queue.pendingApplications.contains(app_2));
assertFalse(queue.activeApplications.contains(app_2));
queue.finishApplicationAttempt(app_0,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_3));
assertFalse(queue.pendingApplications.contains(app_3));
assertFalse(queue.activeApplications.contains(app_0));
queue.finishApplicationAttempt(app_1,A);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_1));
queue.finishApplicationAttempt(app_3,A);
assertEquals(0,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(0,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppSameParent() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInA2=scheduler.getAppsInQueue("a2");
assertTrue(appsInA2.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"a2");
appsInA2=scheduler.getAppsInQueue("a2");
assertEquals(1,appsInA2.size());
queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a2"));
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllApps() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveAllApps("a1","b1");
Thread.sleep(1000);
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillAllAppsInQueue() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
scheduler.killAllAppsInQueue("a1");
rm.waitForState(app.getApplicationId(),RMAppState.KILLED);
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.isEmpty());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppBasic() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"b1");
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testApplicationComparator(){
CapacityScheduler cs=new CapacityScheduler();
Comparator appComparator=cs.getApplicationComparator();
ApplicationId id1=ApplicationId.newInstance(1,1);
ApplicationId id2=ApplicationId.newInstance(1,2);
ApplicationId id3=ApplicationId.newInstance(2,1);
FiCaSchedulerApp app1=Mockito.mock(FiCaSchedulerApp.class);
when(app1.getApplicationId()).thenReturn(id1);
FiCaSchedulerApp app2=Mockito.mock(FiCaSchedulerApp.class);
when(app2.getApplicationId()).thenReturn(id2);
FiCaSchedulerApp app3=Mockito.mock(FiCaSchedulerApp.class);
when(app3.getApplicationId()).thenReturn(id3);
assertTrue(appComparator.compare(app1,app2) < 0);
assertTrue(appComparator.compare(app1,app3) < 0);
assertTrue(appComparator.compare(app2,app3) < 0);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
ResourceScheduler scheduler=new CapacityScheduler();
scheduler.setRMContext(resourceManager.getRMContext());
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.reinitialize(conf,mockContext);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1);
try {
scheduler.reinitialize(conf,mockContext);
fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRecoverRequestAfterPreemption() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000);
RMApp app1=rm1.submitApp(1024);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler cs=(CapacityScheduler)rm1.getResourceScheduler();
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId1=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId1,RMContainerState.ALLOCATED);
RMContainer rmContainer=cs.getRMContainer(containerId1);
List requests=rmContainer.getResourceRequests();
FiCaSchedulerApp app=cs.getApplicationAttempt(am1.getApplicationAttemptId());
FiCaSchedulerNode node=cs.getNode(rmContainer.getAllocatedNode());
for ( ResourceRequest request : requests) {
if (request.getResourceName().equals(node.getRackName()) || request.getResourceName().equals(ResourceRequest.ANY)) {
continue;
}
Assert.assertNull(app.getResourceRequest(request.getPriority(),request.getResourceName()));
}
cs.killContainer(rmContainer);
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(request.getPriority(),request.getResourceName()).getNumContainers());
}
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertTrue(containers.size() == 1);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllAppsInvalidDestination() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
try {
scheduler.moveAllApps("a1","DOES_NOT_EXIST");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
cs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appId,"default","user");
cs.handle(addAppEvent);
SchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
cs.handle(addAttemptEvent);
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllAppsInvalidSource() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
try {
scheduler.moveAllApps("DOES_NOT_EXIST","b1");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
rm.stop();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
Application application_0=new Application("user_0","a1",resourceManager);
application_0.submit();
Application application_1=new Application("user_0","a2",resourceManager);
application_1.submit();
Application application_2=new Application("user_0","b2",resourceManager);
application_2.submit();
ResourceScheduler scheduler=resourceManager.getResourceScheduler();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInA.contains(application_1.getApplicationAttemptId()));
assertEquals(2,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInRoot.contains(application_1.getApplicationAttemptId()));
assertTrue(appsInRoot.contains(application_2.getApplicationAttemptId()));
assertEquals(3,appsInRoot.size());
Assert.assertNull(scheduler.getAppsInQueue("nonexistentqueue"));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillAllAppsInvalidSource() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
try {
scheduler.killAllAppsInQueue("DOES_NOT_EXIST");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000000) public void testExcessReservationThanNodeManagerCapacity() throws Exception {
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",2 * GB,4);
MockNM nm2=rm.registerNode("127.0.0.1:2234",3 * GB,4);
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
int waitCount=20;
int size=rm.getRMContext().getRMNodes().size();
while ((size=rm.getRMContext().getRMNodes().size()) != 2 && waitCount-- > 0) {
LOG.info("Waiting for node managers to register : " + size);
Thread.sleep(100);
}
Assert.assertEquals(2,rm.getRMContext().getRMNodes().size());
RMApp app1=rm.submitApp(128);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
LOG.info("sending container requests ");
am1.addRequests(new String[]{"*"},3 * GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
nm1.nodeHeartbeat(true);
int waitCounter=20;
LOG.info("heartbeating nm1");
while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(500);
alloc1Response=am1.schedule();
}
LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0);
LOG.info("heartbeating nm2");
waitCounter=20;
nm2.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(500);
alloc1Response=am1.schedule();
}
LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testInheritedQueueAcls() throws IOException {
UserGroupInformation user=UserGroupInformation.getCurrentUser();
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B));
ParentQueue c=(ParentQueue)queues.get(C);
LeafQueue c1=stubLeafQueue((LeafQueue)queues.get(C1));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(a.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertTrue(hasQueueACL(b.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c1.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueAcl() throws Exception {
setupMultiLevelQueues(csConf);
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.SUBMIT_APPLICATIONS," ");
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.ADMINISTER_QUEUE," ");
final String Q_C=CapacitySchedulerConfiguration.ROOT + "." + C;
csConf.setAcl(Q_C,QueueACL.ADMINISTER_QUEUE,"*");
final String Q_C11=Q_C + "." + C1+ "."+ C11;
csConf.setAcl(Q_C11,QueueACL.SUBMIT_APPLICATIONS,"*");
Map queues=new HashMap();
CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,queues,queues,TestUtils.spyHook);
UserGroupInformation user=UserGroupInformation.getCurrentUser();
ParentQueue c=(ParentQueue)queues.get(C);
ParentQueue c1=(ParentQueue)queues.get(C1);
ParentQueue c11=(ParentQueue)queues.get(C11);
ParentQueue c111=(ParentQueue)queues.get(C111);
assertFalse(root.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
List aclInfos=root.getQueueUserAclInfo(user);
assertFalse(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"root"));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"root"));
assertTrue(c.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c"));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c"));
assertTrue(c1.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c1"));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c1"));
assertTrue(c11.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c11"));
assertTrue(c11.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c11"));
assertTrue(c111.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c111"));
assertTrue(c111.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c111"));
reset(c);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testQueueCapacityZero() throws Exception {
setupMultiLevelQueues(csConf);
final String Q_B=CapacitySchedulerConfiguration.ROOT + "." + B;
csConf.setCapacity(Q_B,0);
csConf.setCapacity(Q_B + "." + B1,0);
csConf.setCapacity(Q_B + "." + B2,0);
csConf.setCapacity(Q_B + "." + B3,0);
final String Q_A=CapacitySchedulerConfiguration.ROOT + "." + A;
csConf.setCapacity(Q_A,60);
Map queues=new HashMap();
try {
CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,queues,queues,TestUtils.spyHook);
}
catch ( IllegalArgumentException e) {
fail("Failed to create queues with 0 capacity: " + e);
}
assertTrue("Failed to create queues with 0 capacity",true);
}
BooleanVerifier
@Test(timeout=60000) public void testQueueMapping() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
YarnConfiguration conf=new YarnConfiguration(csConf);
CapacityScheduler cs=new CapacityScheduler();
RMContextImpl rmContext=new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null);
cs.setConf(conf);
cs.setRMContext(rmContext);
cs.init(conf);
cs.start();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class);
conf.set(CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,"true");
checkInvalidQMapping(conf,cs,"x:a:b","invalid specifier");
checkInvalidQMapping(conf,cs,"u:a","no queue specified");
checkInvalidQMapping(conf,cs,"g:a","no queue specified");
checkInvalidQMapping(conf,cs,"u:a:b,g:a","multiple mappings with invalid mapping");
checkInvalidQMapping(conf,cs,"u:a:b,g:a:d:e","too many path segments");
checkInvalidQMapping(conf,cs,"u::","empty source and queue");
checkInvalidQMapping(conf,cs,"u:","missing source missing queue");
checkInvalidQMapping(conf,cs,"u:a:","empty source missing q");
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:a:" + Q1);
cs.reinitialize(conf,null);
checkQMapping("a",Q1,cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"g:agroup:" + Q1);
cs.reinitialize(conf,null);
checkQMapping("a",Q1,cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:%user:" + Q2);
cs.reinitialize(conf,null);
checkQMapping("a",Q2,cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:%user:%user");
cs.reinitialize(conf,null);
checkQMapping("a","a",cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:%user:%primary_group");
cs.reinitialize(conf,null);
checkQMapping("a","agroup",cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"g:asubgroup1:" + Q1);
cs.reinitialize(conf,null);
checkQMapping("a",Q1,cs);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING," u : a : " + Q1);
cs.reinitialize(conf,null);
checkQMapping("a",Q1,cs);
csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
conf=new YarnConfiguration(csConf);
resourceManager=new MockRM(csConf);
resourceManager.start();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class);
conf.set(CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,"true");
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:user:" + Q1);
resourceManager.getResourceScheduler().reinitialize(conf,null);
checkAppQueue(resourceManager,"user",Q2,Q1);
conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,false);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:user:" + Q1);
setupQueueConfiguration(csConf);
resourceManager.getResourceScheduler().reinitialize(conf,null);
checkAppQueue(resourceManager,"user",Q2,Q2);
checkAppQueue(resourceManager,"user",null,Q1);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"g:usergroup:" + Q2);
setupQueueConfiguration(csConf);
resourceManager.getResourceScheduler().reinitialize(conf,null);
checkAppQueue(resourceManager,"user",null,Q2);
conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,"u:user:non_existent_queue");
setupQueueConfiguration(csConf);
boolean fail=false;
try {
resourceManager.getResourceScheduler().reinitialize(conf,null);
}
catch ( IOException ioex) {
fail=true;
}
Assert.assertTrue("queue initialization failed for non-existent q",fail);
resourceManager.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAllocationFileParsing() throws Exception {
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println("alice,bob admins ");
out.println("fair ");
out.println(" ");
out.println("");
out.println("alice,bob admins ");
out.println(" ");
out.println("");
out.println("3 ");
out.println("0.4 ");
out.println(" ");
out.println("");
out.println("60 ");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println("15 ");
out.println("5 ");
out.println("0.5f ");
out.println("");
out.println("10 ");
out.println(" ");
out.println("120" + " ");
out.println("300 ");
out.println("drf ");
out.println(" ");
out.close();
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration queueConf=confHolder.allocConf;
assertEquals(6,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA"));
assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE"));
assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(15,queueConf.getQueueMaxApps("root.queueA"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueB"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueC"));
assertEquals(3,queueConf.getQueueMaxApps("root.queueD"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueE"));
assertEquals(10,queueConf.getUserMaxApps("user1"));
assertEquals(5,queueConf.getUserMaxApps("user2"));
assertEquals(.5f,queueConf.getQueueMaxAMShare("root." + YarnConfiguration.DEFAULT_QUEUE_NAME),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueA"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueB"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueC"),0.01);
assertEquals(.4f,queueConf.getQueueMaxAMShare("root.queueD"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueE"),0.01);
assertEquals("*",queueConf.getQueueAcl("root",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals("*",queueConf.getQueueAcl("root",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE"));
assertEquals(300000,queueConf.getFairSharePreemptionTimeout());
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueF"));
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueG"));
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueG.queueH"));
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root").getName());
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.queueA").getName());
assertEquals(FairSharePolicy.NAME,queueConf.getSchedulingPolicy("root.queueB").getName());
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.newqueue").getName());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=10000) public void testReload() throws Exception {
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println(" ");
out.println(" 1 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
MockClock clock=new MockClock();
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(clock);
allocLoader.reloadIntervalMs=5;
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration allocConf=confHolder.allocConf;
QueuePlacementPolicy policy=allocConf.getPlacementPolicy();
List rules=policy.getRules();
assertEquals(1,rules.size());
assertEquals(QueuePlacementRule.Default.class,rules.get(0).getClass());
assertEquals(1,allocConf.getQueueMaxApps("root.queueA"));
assertEquals(2,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueA"));
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB"));
confHolder.allocConf=null;
out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println(" ");
out.println(" 3 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
clock.tick(System.currentTimeMillis() + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000);
allocLoader.start();
while (confHolder.allocConf == null) {
Thread.sleep(20);
}
allocConf=confHolder.allocConf;
policy=allocConf.getPlacementPolicy();
rules=policy.getRules();
assertEquals(3,rules.size());
assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass());
assertEquals(QueuePlacementRule.NestedUserQueue.class,rules.get(1).getClass());
assertEquals(QueuePlacementRule.PrimaryGroup.class,((NestedUserQueue)(rules.get(1))).nestedRule.getClass());
assertEquals(QueuePlacementRule.Default.class,rules.get(2).getClass());
assertEquals(3,allocConf.getQueueMaxApps("root.queueB"));
assertEquals(1,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetAllocationFileFromClasspath(){
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,"test-fair-scheduler.xml");
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
File allocationFile=allocLoader.getAllocationFile(conf);
assertEquals("test-fair-scheduler.xml",allocationFile.getName());
assertTrue(allocationFile.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testUpdateDemand(){
FSAppAttempt app=mock(FSAppAttempt.class);
Mockito.when(app.getDemand()).thenReturn(maxResource);
schedulable.addAppSchedulable(app);
schedulable.addAppSchedulable(app);
schedulable.updateDemand();
assertTrue("Demand is greater than max allowed ",Resources.equals(schedulable.getDemand(),maxResource));
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=2000) public void testLoadConfigurationOnInitialize() throws IOException {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true);
conf.setInt(FairSchedulerConfiguration.MAX_ASSIGN,3);
conf.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,true);
conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,.5);
conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,.7);
conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true);
conf.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,10);
conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_RACK_MS,5000);
conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_NODE_MS,5000);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,512);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,128);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
Assert.assertEquals(true,scheduler.assignMultiple);
Assert.assertEquals(3,scheduler.maxAssign);
Assert.assertEquals(true,scheduler.sizeBasedWeight);
Assert.assertEquals(.5,scheduler.nodeLocalityThreshold,.01);
Assert.assertEquals(.7,scheduler.rackLocalityThreshold,.01);
Assert.assertTrue("The continuous scheduling should be enabled",scheduler.continuousSchedulingEnabled);
Assert.assertEquals(10,scheduler.continuousSchedulingSleepMs);
Assert.assertEquals(5000,scheduler.nodeLocalityDelayMs);
Assert.assertEquals(5000,scheduler.rackLocalityDelayMs);
Assert.assertEquals(1024,scheduler.getMaximumResourceCapability().getMemory());
Assert.assertEquals(512,scheduler.getMinimumResourceCapability().getMemory());
Assert.assertEquals(128,scheduler.getIncrementResourceCapability().getMemory());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception {
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10);
MockClock clock=new MockClock();
scheduler.setClock(clock);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
Priority priority=Priority.newInstance(20);
String host="127.0.0.1";
int GB=1024;
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * 1024,4),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
List ask=new ArrayList();
ResourceRequest nodeLocalRequest=createResourceRequest(GB,1,host,priority.getPriority(),1,true);
ResourceRequest rackLocalRequest=createResourceRequest(GB,1,node.getRackName(),priority.getPriority(),1,true);
ResourceRequest offRackRequest=createResourceRequest(GB,1,ResourceRequest.ANY,priority.getPriority(),1,true);
ask.add(nodeLocalRequest);
ask.add(rackLocalRequest);
ask.add(offRackRequest);
ApplicationAttemptId appAttemptId=createSchedulingRequest("queueA","user1",ask);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdate=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeUpdate);
assertEquals(1,scheduler.getSchedulerApp(appAttemptId).getLiveContainers().size());
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
Assert.assertNull(app.getResourceRequest(priority,host));
ContainerId containerId1=ContainerId.newInstance(appAttemptId,1);
RMContainer rmContainer=app.getRMContainer(containerId1);
scheduler.warnOrKillContainer(rmContainer);
clock.tick(5);
scheduler.warnOrKillContainer(rmContainer);
List requests=rmContainer.getResourceRequests();
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(priority,request.getResourceName()).getNumContainers());
}
scheduler.update();
scheduler.handle(nodeUpdate);
List containers=scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,null).getContainers();
Assert.assertTrue(containers.size() == 1);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testChoiceOfPreemptedContainers() throws Exception {
conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL,5000);
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10000);
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file",ALLOC_FILE);
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false");
MockClock clock=new MockClock();
scheduler.setClock(clock);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(".25 ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,1);
createSchedulingRequestExistingApplication(1 * 1024,1,2,app1);
ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,3);
createSchedulingRequestExistingApplication(1 * 1024,1,4,app2);
ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,1);
createSchedulingRequestExistingApplication(1 * 1024,1,2,app3);
ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,3);
createSchedulingRequestExistingApplication(1 * 1024,1,4,app4);
scheduler.update();
scheduler.getQueueManager().getLeafQueue("queueA",true).setPolicy(SchedulingPolicy.parse("fifo"));
scheduler.getQueueManager().getLeafQueue("queueB",true).setPolicy(SchedulingPolicy.parse("fair"));
NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2);
for (int i=0; i < 4; i++) {
scheduler.handle(nodeUpdate1);
scheduler.handle(nodeUpdate2);
}
assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size());
assertEquals(2,scheduler.getSchedulerApp(app2).getLiveContainers().size());
assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size());
assertEquals(2,scheduler.getSchedulerApp(app4).getLiveContainers().size());
createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1);
createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1);
createSchedulingRequest(1 * 1024,1,"default","user1",1,1);
createSchedulingRequest(1 * 1024,1,"default","user1",1,1);
scheduler.update();
scheduler.preemptResources(Resources.createResource(2 * 1024));
assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size());
assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size());
assertTrue("App2 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers()));
assertTrue("App4 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers()));
clock.tick(15);
scheduler.preemptResources(Resources.createResource(2 * 1024));
assertEquals(1,scheduler.getSchedulerApp(app2).getLiveContainers().size());
assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size());
Set set=new HashSet();
for ( RMContainer container : scheduler.getSchedulerApp(app2).getLiveContainers()) {
if (container.getAllocatedPriority().getPriority() == 4) {
set.add(container);
}
}
for ( RMContainer container : scheduler.getSchedulerApp(app4).getLiveContainers()) {
if (container.getAllocatedPriority().getPriority() == 4) {
set.add(container);
}
}
assertTrue("Containers with priority=4 in app2 and app4 should be " + "preempted.",set.isEmpty());
scheduler.preemptResources(Resources.createResource(2 * 1024));
clock.tick(15);
scheduler.preemptResources(Resources.createResource(2 * 1024));
assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size());
assertEquals(0,scheduler.getSchedulerApp(app2).getLiveContainers().size());
assertEquals(1,scheduler.getSchedulerApp(app3).getLiveContainers().size());
assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size());
scheduler.preemptResources(Resources.createResource(2 * 1024));
assertTrue("App1 should have no container to be preempted",scheduler.getSchedulerApp(app1).getPreemptionContainers().isEmpty());
assertTrue("App2 should have no container to be preempted",scheduler.getSchedulerApp(app2).getPreemptionContainers().isEmpty());
assertTrue("App3 should have no container to be preempted",scheduler.getSchedulerApp(app3).getPreemptionContainers().isEmpty());
assertTrue("App4 should have no container to be preempted",scheduler.getSchedulerApp(app4).getPreemptionContainers().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveMakesAppRunnable() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app));
scheduler.moveApplication(appAttId.getApplicationId(),"queue2");
assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app));
assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
}
UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContinuousSchedulingWithNodeRemoved() throws Exception {
scheduler.init(conf);
scheduler.start();
Assert.assertTrue("Continuous scheduling should be disabled.",!scheduler.isContinuousSchedulingEnabled());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
Assert.assertEquals("We should have two alive nodes.",2,scheduler.getNumClusterNodes());
NodeRemovedSchedulerEvent removeNode1=new NodeRemovedSchedulerEvent(node1);
scheduler.handle(removeNode1);
Assert.assertEquals("We should only have one alive node.",1,scheduler.getNumClusterNodes());
try {
scheduler.continuousSchedulingAttempt();
}
catch ( Exception e) {
fail("Exception happened when doing continuous scheduling. " + e.toString());
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testContinuousScheduling() throws Exception {
FairScheduler fs=new FairScheduler();
Configuration conf=createConfiguration();
conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true);
fs.setRMContext(resourceManager.getRMContext());
fs.init(conf);
fs.start();
fs.reinitialize(conf,resourceManager.getRMContext());
Assert.assertTrue("Continuous scheduling should be enabled.",fs.isContinuousSchedulingEnabled());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
fs.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
fs.handle(nodeEvent2);
Assert.assertEquals(fs.getClusterResource().getMemory(),16 * 1024);
Assert.assertEquals(fs.getClusterResource().getVirtualCores(),16);
ApplicationAttemptId appAttemptId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++);
fs.addApplication(appAttemptId.getApplicationId(),"queue11","user11",false);
fs.addApplicationAttempt(appAttemptId,false,false);
List ask=new ArrayList();
ResourceRequest request=createResourceRequest(1024,1,ResourceRequest.ANY,1,1,true);
ask.add(request);
fs.allocate(appAttemptId,ask,new ArrayList(),null,null);
Thread.sleep(fs.getConf().getContinuousSchedulingSleepMs() + 500);
FSAppAttempt app=fs.getSchedulerApp(appAttemptId);
while (app.getCurrentConsumption().equals(Resources.none())) {
}
Assert.assertEquals(1024,app.getCurrentConsumption().getMemory());
Assert.assertEquals(1,app.getCurrentConsumption().getVirtualCores());
request=createResourceRequest(1024,1,ResourceRequest.ANY,2,1,true);
ask.clear();
ask.add(request);
fs.allocate(appAttemptId,ask,new ArrayList(),null,null);
while (app.getCurrentConsumption().equals(Resources.createResource(1024,1))) {
}
Assert.assertEquals(2048,app.getCurrentConsumption().getMemory());
Assert.assertEquals(2,app.getCurrentConsumption().getVirtualCores());
Set nodes=new HashSet();
Iterator it=app.getLiveContainers().iterator();
while (it.hasNext()) {
nodes.add(it.next().getContainer().getNodeId());
}
Assert.assertEquals(2,nodes.size());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
ApplicationAttemptId appAttId1=createSchedulingRequest(1024,1,"queue1.subqueue1","user1");
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,1,"queue1.subqueue2","user1");
ApplicationAttemptId appAttId3=createSchedulingRequest(1024,1,"default","user1");
List apps=scheduler.getAppsInQueue("queue1.subqueue1");
assertEquals(1,apps.size());
assertEquals(appAttId1,apps.get(0));
apps=scheduler.getAppsInQueue("root.queue1.subqueue1");
assertEquals(1,apps.size());
assertEquals(appAttId1,apps.get(0));
apps=scheduler.getAppsInQueue("user1");
assertEquals(1,apps.size());
assertEquals(appAttId3,apps.get(0));
apps=scheduler.getAppsInQueue("root.user1");
assertEquals(1,apps.size());
assertEquals(appAttId3,apps.get(0));
apps=scheduler.getAppsInQueue("queue1");
Assert.assertEquals(2,apps.size());
Set appAttIds=Sets.newHashSet(apps.get(0),apps.get(1));
assertTrue(appAttIds.contains(appAttId1));
assertTrue(appAttIds.contains(appAttId2));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPreemptionDecision() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
MockClock clock=new MockClock();
scheduler.setClock(clock);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("0mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.print("5 ");
out.print("10 ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
RMNode node3=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),3,"127.0.0.3");
NodeAddedSchedulerEvent nodeEvent3=new NodeAddedSchedulerEvent(node3);
scheduler.handle(nodeEvent3);
ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,"queueA","user1",1,1);
ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,"queueA","user1",1,2);
ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,"queueA","user1",1,3);
ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,"queueB","user1",1,1);
ApplicationAttemptId app5=createSchedulingRequest(1 * 1024,"queueB","user1",1,2);
ApplicationAttemptId app6=createSchedulingRequest(1 * 1024,"queueB","user1",1,3);
scheduler.update();
for (int i=0; i < 2; i++) {
NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeUpdate1);
NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2);
scheduler.handle(nodeUpdate2);
NodeUpdateSchedulerEvent nodeUpdate3=new NodeUpdateSchedulerEvent(node3);
scheduler.handle(nodeUpdate3);
}
ApplicationAttemptId app7=createSchedulingRequest(1 * 1024,"queueC","user1",1,1);
ApplicationAttemptId app8=createSchedulingRequest(1 * 1024,"queueC","user1",1,2);
ApplicationAttemptId app9=createSchedulingRequest(1 * 1024,"queueC","user1",1,3);
ApplicationAttemptId app10=createSchedulingRequest(1 * 1024,"queueD","user1",1,1);
ApplicationAttemptId app11=createSchedulingRequest(1 * 1024,"queueD","user1",1,2);
ApplicationAttemptId app12=createSchedulingRequest(1 * 1024,"queueD","user1",1,3);
scheduler.update();
FSLeafQueue schedC=scheduler.getQueueManager().getLeafQueue("queueC",true);
FSLeafQueue schedD=scheduler.getQueueManager().getLeafQueue("queueD",true);
assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedC,clock.getTime())));
assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedD,clock.getTime())));
clock.tick(6);
assertEquals(1024,scheduler.resToPreempt(schedC,clock.getTime()).getMemory());
assertEquals(1024,scheduler.resToPreempt(schedD,clock.getTime()).getMemory());
scheduler.update();
clock.tick(6);
assertEquals(1536,scheduler.resToPreempt(schedC,clock.getTime()).getMemory());
assertEquals(1536,scheduler.resToPreempt(schedD,clock.getTime()).getMemory());
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
FairScheduler scheduler=new FairScheduler();
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final int GB=1024;
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1);
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host));
List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true));
scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size());
scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(app.isBlacklisted(host));
createSchedulingRequest(GB,"root.default","user",1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size());
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testThreadLifeCycle() throws InterruptedException {
conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true);
scheduler.init(conf);
scheduler.start();
Thread updateThread=scheduler.updateThread;
Thread schedulingThread=scheduler.schedulingThread;
assertTrue(updateThread.isAlive());
assertTrue(schedulingThread.isAlive());
scheduler.stop();
int numRetries=100;
while (numRetries-- > 0 && (updateThread.isAlive() || schedulingThread.isAlive())) {
Thread.sleep(50);
}
assertNotEquals("One of the threads is still alive",0,numRetries);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testMoveRunnableApp() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
ApplicationId appId=appAttId.getApplicationId();
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(1024));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.handle(updateEvent);
assertEquals(Resource.newInstance(1024,1),oldQueue.getResourceUsage());
scheduler.update();
assertEquals(Resource.newInstance(3072,3),oldQueue.getDemand());
scheduler.moveApplication(appId,"queue2");
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertSame(targetQueue,app.getQueue());
assertFalse(oldQueue.getRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(Resource.newInstance(0,0),oldQueue.getResourceUsage());
assertEquals(Resource.newInstance(1024,1),targetQueue.getResourceUsage());
assertEquals(0,oldQueue.getNumRunnableApps());
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
scheduler.update();
assertEquals(Resource.newInstance(0,0),oldQueue.getDemand());
assertEquals(Resource.newInstance(3072,3),targetQueue.getDemand());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testConfigureRootQueue() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("fair ");
out.println("");
out.println(" drf ");
out.println(" ");
out.println(" 1024mb,1vcores ");
out.println(" ");
out.println(" ");
out.println(" 1024mb,4vcores ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
FSQueue root=queueManager.getRootQueue();
assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
assertNotNull(queueManager.getLeafQueue("child1",false));
assertNotNull(queueManager.getLeafQueue("child2",false));
}
InternalCallVerifier BooleanVerifier
/**
* Make sure the scheduler creates the event log.
*/
@Test public void testCreateEventLog() throws IOException {
FairSchedulerEventLog eventLog=scheduler.getEventLog();
logFile=new File(eventLog.getLogFile());
Assert.assertTrue(logFile.exists());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testReloadTurnsLeafToParentWithNoLeaf(){
AllocationConfiguration allocConf=new AllocationConfiguration(conf);
allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.queue1");
queueManager.updateAllocationConfiguration(allocConf);
assertNotNull(queueManager.getLeafQueue("root.queue1",false));
notEmptyQueues.add(queueManager.getLeafQueue("root.queue1",false));
allocConf=new AllocationConfiguration(conf);
allocConf.configuredQueues.get(FSQueueType.PARENT).add("root.queue1");
queueManager.updateAllocationConfiguration(allocConf);
assertNotNull(queueManager.getLeafQueue("root.queue1",false));
assertNull(queueManager.getParentQueue("root.queue1",false));
notEmptyQueues.clear();
queueManager.updateAllocationConfiguration(allocConf);
assertNull(queueManager.getLeafQueue("root.queue1",false));
assertNotNull(queueManager.getParentQueue("root.queue1",false));
assertTrue(queueManager.getParentQueue("root.queue1",false).getChildQueues().isEmpty());
}
BooleanVerifier
@Test(timeout=1000) public void testParseSchedulingPolicy() throws AllocationConfigurationException {
SchedulingPolicy sm=SchedulingPolicy.parse(FairSharePolicy.class.getName());
assertTrue("Invalid scheduler name",sm.getName().equals(FairSharePolicy.NAME));
sm=SchedulingPolicy.parse(FairSharePolicy.class.getCanonicalName());
assertTrue("Invalid scheduler name",sm.getName().equals(FairSharePolicy.NAME));
sm=SchedulingPolicy.getInstance(FairSharePolicy.class);
assertTrue("Invalid scheduler name",sm.getName().equals(FairSharePolicy.NAME));
sm=SchedulingPolicy.parse("drf");
assertTrue("Invalid scheduler name",sm.getName().equals(DominantResourceFairnessPolicy.NAME));
sm=SchedulingPolicy.parse("fair");
assertTrue("Invalid scheduler name",sm.getName().equals(FairSharePolicy.NAME));
sm=SchedulingPolicy.parse("fifo");
assertTrue("Invalid scheduler name",sm.getName().equals(FifoPolicy.NAME));
}
InternalCallVerifier BooleanVerifier
/**
* Trivial tests that make sure{@link SchedulingPolicy#isApplicableTo(SchedulingPolicy,byte)} works as
* expected for the possible values of depth
* @throws AllocationConfigurationException
*/
@Test(timeout=1000) public void testIsApplicableTo() throws AllocationConfigurationException {
final String ERR="Broken SchedulingPolicy#isApplicableTo";
SchedulingPolicy policy=SchedulingPolicy.parse("fifo");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(SchedulingPolicy.parse("fifo"),SchedulingPolicy.DEPTH_INTERMEDIATE));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(SchedulingPolicy.parse("fifo"),SchedulingPolicy.DEPTH_ROOT));
policy=SchedulingPolicy.parse("fair");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
policy=SchedulingPolicy.parse("drf");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
policy=Mockito.mock(SchedulingPolicy.class);
Mockito.when(policy.getApplicableDepth()).thenReturn(SchedulingPolicy.DEPTH_PARENT);
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
}
BooleanVerifier
@Test public void testOneIsNeedy(){
assertTrue(createComparator(8000,8).compare(createSchedulable(2000,5,0,6),createSchedulable(4000,3,0,0)) < 0);
}
BooleanVerifier
@Test public void testSameDominantResource(){
assertTrue(createComparator(8000,4).compare(createSchedulable(1000,1),createSchedulable(2000,1)) < 0);
}
BooleanVerifier
@Test public void testDifferentDominantResource(){
assertTrue(createComparator(8000,8).compare(createSchedulable(4000,3),createSchedulable(2000,5)) < 0);
}
BooleanVerifier
@Test public void testEvenWeightsSameDominantResource(){
assertTrue(createComparator(8000,8).compare(createSchedulable(3000,1,new ResourceWeights(2.0f)),createSchedulable(2000,1)) < 0);
assertTrue(createComparator(8000,8).compare(createSchedulable(1000,3,new ResourceWeights(2.0f)),createSchedulable(1000,2)) < 0);
}
BooleanVerifier
@Test public void testUnevenWeightsSameDominantResource(){
assertTrue(createComparator(8000,8).compare(createSchedulable(3000,1,new ResourceWeights(2.0f,1.0f)),createSchedulable(2000,1)) < 0);
assertTrue(createComparator(8000,8).compare(createSchedulable(1000,3,new ResourceWeights(1.0f,2.0f)),createSchedulable(1000,2)) < 0);
}
BooleanVerifier
@Test public void testUnevenWeightsDifferentDominantResource(){
assertTrue(createComparator(8000,8).compare(createSchedulable(1000,3,new ResourceWeights(1.0f,2.0f)),createSchedulable(2000,1)) < 0);
assertTrue(createComparator(8000,8).compare(createSchedulable(3000,1,new ResourceWeights(2.0f,1.0f)),createSchedulable(1000,2)) < 0);
}
BooleanVerifier
@Test public void testBothAreNeedy(){
assertTrue(createComparator(8000,100).compare(createSchedulable(2000,5),createSchedulable(4000,3)) < 0);
assertTrue(createComparator(8000,100).compare(createSchedulable(2000,5,3000,6),createSchedulable(4000,3,5000,4)) < 0);
}
BooleanVerifier
@Test public void testEvenWeightsDifferentDominantResource(){
assertTrue(createComparator(8000,8).compare(createSchedulable(1000,3,new ResourceWeights(2.0f)),createSchedulable(2000,1)) < 0);
assertTrue(createComparator(8000,8).compare(createSchedulable(3000,1,new ResourceWeights(2.0f)),createSchedulable(1000,2)) < 0);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
Application application_0=new Application("user_0",resourceManager);
application_0.submit();
Application application_1=new Application("user_0",resourceManager);
application_1.submit();
ResourceScheduler scheduler=resourceManager.getResourceScheduler();
List appsInDefault=scheduler.getAppsInQueue("default");
assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId()));
assertEquals(2,appsInDefault.size());
Assert.assertNull(scheduler.getAppsInQueue("someotherqueue"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
fs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId,"default","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
fs.handle(attemptEvent);
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Validate master-key-roll-over and that tokens are usable even after
* master-key-roll-over.
* @throws Exception
*/
@Test public void testMasterKeyRollOver() throws Exception {
conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms);
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
Long startTime=System.currentTimeMillis();
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
ApplicationMasterProtocol rmClient=null;
AMRMTokenSecretManager appTokenSecretManager=rm.getRMContext().getAMRMTokenSecretManager();
MasterKeyData oldKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(oldKey);
try {
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < maxWaitAttempts) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
rmClient.registerApplicationMaster(request);
AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
rmClient.allocate(allocateRequest);
Thread.sleep(500);
}
MasterKeyData newKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(newKey);
Assert.assertFalse("Master key should have changed!",oldKey.equals(newKey));
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
waitCount=0;
while (waitCount++ <= maxWaitAttempts) {
if (appTokenSecretManager.getCurrnetMasterKeyData() != oldKey) {
break;
}
try {
rmClient.allocate(allocateRequest);
}
catch ( Exception ex) {
break;
}
Thread.sleep(200);
}
Assert.assertTrue(appTokenSecretManager.getCurrnetMasterKeyData().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getMasterKey().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getNextMasterKeyData() == null);
Token newToken=appTokenSecretManager.createAndGetAMRMToken(applicationAttemptId);
SecurityUtil.setTokenService(newToken,rmBindAddress);
currentUser.addToken(newToken);
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
rpc.stopProxy(rmClient,conf);
try {
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
}
}
finally {
rm.stop();
if (rmClient != null) {
rpc.stopProxy(rmClient,conf);
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Validate that application tokens are unusable after the
* application-finishes.
* @throws Exception
*/
@SuppressWarnings("unchecked") @Test public void testTokenExpiry() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
ApplicationMasterProtocol rmClient=null;
try {
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
rmClient.registerApplicationMaster(request);
FinishApplicationMasterRequest finishAMRequest=Records.newRecord(FinishApplicationMasterRequest.class);
finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
finishAMRequest.setDiagnostics("diagnostics");
finishAMRequest.setTrackingUrl("url");
rmClient.finishApplicationMaster(finishAMRequest);
ContainerStatus containerStatus=BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(),ContainerState.COMPLETE,"AM Container Finished",0);
rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppAttemptContainerFinishedEvent(applicationAttemptId,containerStatus));
int count=0;
while (attempt.getState() != RMAppAttemptState.FINISHED && count < maxWaitAttempts) {
Thread.sleep(100);
count++;
}
Assert.assertTrue(attempt.getState() == RMAppAttemptState.FINISHED);
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class);
try {
rmClient.allocate(allocateRequest);
Assert.fail("You got to be kidding me! " + "Using App tokens after app-finish should fail!");
}
catch ( Throwable t) {
LOG.info("Exception found is ",t);
Assert.assertTrue(t.getCause().getMessage().contains(applicationAttemptId.toString() + " not found in AMRMTokenSecretManager."));
}
}
finally {
rm.stop();
if (rmClient != null) {
rpc.stopProxy(rmClient,conf);
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testClientToAMTokens() throws Exception {
final Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
ContainerManagementProtocol containerManager=mock(ContainerManagementProtocol.class);
StartContainersResponse mockResponse=mock(StartContainersResponse.class);
when(containerManager.startContainers((StartContainersRequest)any())).thenReturn(mockResponse);
final DrainDispatcher dispatcher=new DrainDispatcher();
MockRM rm=new MockRMWithCustomAMLauncher(conf,containerManager){
protected ClientRMService createClientRMService(){
return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,getRMContext().getRMDelegationTokenSecretManager());
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
@Override protected void doSecureLogin() throws IOException {
}
}
;
rm.start();
RMApp app=rm.submitApp(1024);
MockNM nm1=rm.registerNode("localhost:1234",3072);
nm1.nodeHeartbeat(true);
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttempt=app.getCurrentAppAttempt().getAppAttemptId();
final MockAM mockAM=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),app.getCurrentAppAttempt().getAppAttemptId());
UserGroupInformation appUgi=UserGroupInformation.createRemoteUser(appAttempt.toString());
RegisterApplicationMasterResponse response=appUgi.doAs(new PrivilegedAction(){
@Override public RegisterApplicationMasterResponse run(){
RegisterApplicationMasterResponse response=null;
try {
response=mockAM.registerAppAttempt();
}
catch ( Exception e) {
Assert.fail("Exception was not expected");
}
return response;
}
}
);
GetApplicationReportRequest request=Records.newRecord(GetApplicationReportRequest.class);
request.setApplicationId(app.getApplicationId());
GetApplicationReportResponse reportResponse=rm.getClientRMService().getApplicationReport(request);
ApplicationReport appReport=reportResponse.getApplicationReport();
org.apache.hadoop.yarn.api.records.Token originalClientToAMToken=appReport.getClientToAMToken();
Assert.assertNotNull(response.getClientToAMTokenMasterKey());
Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
ApplicationAttemptId appAttemptId=app.getAppAttempts().keySet().iterator().next();
Assert.assertNotNull(appAttemptId);
final CustomAM am=new CustomAM(appAttemptId,response.getClientToAMTokenMasterKey().array());
am.init(conf);
am.start();
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
try {
CustomProtocol client=(CustomProtocol)RPC.getProxy(CustomProtocol.class,1L,am.address,conf);
client.ping();
fail("Access by unauthenticated user should fail!!");
}
catch ( Exception e) {
Assert.assertFalse(am.pinged);
}
Token token=ConverterUtils.convertFromYarn(originalClientToAMToken,am.address);
verifyTokenWithTamperedID(conf,am,token);
verifyTokenWithTamperedUserName(conf,am,token);
verifyValidToken(conf,am,token);
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testAppSubmissionWithInvalidDelegationToken() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
MockRM rm=new MockRM(conf);
ByteBuffer tokens=ByteBuffer.wrap("BOGUS".getBytes());
ContainerLaunchContext amContainer=ContainerLaunchContext.newInstance(new HashMap(),new HashMap(),new ArrayList(),new HashMap(),tokens,new HashMap());
ApplicationSubmissionContext appSubContext=ApplicationSubmissionContext.newInstance(ApplicationId.newInstance(1234121,0),"BOGUS","default",Priority.UNDEFINED,amContainer,false,true,1,Resource.newInstance(1024,1),"BOGUS");
SubmitApplicationRequest request=SubmitApplicationRequest.newInstance(appSubContext);
try {
rm.getClientRMService().submitApplication(request);
fail("Error was excepted.");
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().contains("Bad header found in token storage"));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testDTRonAppSubmission() throws IOException, InterruptedException, BrokenBarrierException {
final Credentials credsx=new Credentials();
final Token> tokenx=mock(Token.class);
credsx.addToken(new Text("token"),tokenx);
doReturn(true).when(tokenx).isManaged();
doThrow(new IOException("boom")).when(tokenx).renew(any(Configuration.class));
final DelegationTokenRenewer dtr=createNewDelegationTokenRenewer(conf,counter);
RMContext mockContext=mock(RMContext.class);
ClientRMService mockClientRMService=mock(ClientRMService.class);
when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
InetSocketAddress sockAddr=InetSocketAddress.createUnresolved("localhost",1234);
when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
dtr.setRMContext(mockContext);
when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr);
dtr.init(conf);
dtr.start();
try {
dtr.addApplicationSync(mock(ApplicationId.class),credsx,false);
fail("Catch IOException on app submission");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains(tokenx.toString()));
Assert.assertTrue(e.getCause().toString().contains("boom"));
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=60000) public void testAppRejectionWithCancelledDelegationToken() throws Exception {
MyFS dfs=(MyFS)FileSystem.get(conf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode());
MyToken token=dfs.getDelegationToken("user1");
token.cancelToken();
Credentials ts=new Credentials();
ts.addToken(token.getKind(),token);
ApplicationId appId=BuilderUtils.newApplicationId(0,0);
delegationTokenRenewer.addApplicationAsync(appId,ts,true);
int waitCnt=20;
while (waitCnt-- > 0) {
if (!eventQueue.isEmpty()) {
Event evt=eventQueue.take();
if (evt.getType() == RMAppEventType.APP_REJECTED) {
Assert.assertTrue(((RMAppEvent)evt).getApplicationId().equals(appId));
return;
}
}
else {
Thread.sleep(500);
}
}
fail("App submission with a cancelled token should have failed");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStates() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
params.add("states",YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppStatistics() throws JSONException, Exception {
try {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",4096);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"OTHER");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("appstatistics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
JSONArray statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length());
for (int i=0; i < YarnApplicationState.values().length; ++i) {
assertEquals("*",statItems.getJSONObject(0).getString("type"));
if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
assertEquals("2",statItems.getJSONObject(0).getString("count"));
}
else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else {
assertEquals("0",statItems.getJSONObject(0).getString("count"));
}
}
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",1,statItems.length());
assertEquals("ACCEPTED",statItems.getJSONObject(0).getString("state"));
assertEquals("*",statItems.getJSONObject(0).getString("type"));
assertEquals("2",statItems.getJSONObject(0).getString("count"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length());
for (int i=0; i < YarnApplicationState.values().length; ++i) {
assertEquals("mapreduce",statItems.getJSONObject(0).getString("type"));
if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else {
assertEquals("0",statItems.getJSONObject(0).getString("count"));
}
}
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE,OTHER").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject exception=json.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String className=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","we temporarily support at most one applicationType",message);
WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className);
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.FINISHED.toString() + "," + YarnApplicationState.ACCEPTED.toString()).queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",2,statItems.length());
JSONObject statItem1=statItems.getJSONObject(0);
JSONObject statItem2=statItems.getJSONObject(1);
assertTrue((statItem1.getString("state").equals("ACCEPTED") && statItem2.getString("state").equals("FINISHED")) || (statItem2.getString("state").equals("ACCEPTED") && statItem1.getString("state").equals("FINISHED")));
assertEquals("mapreduce",statItem1.getString("type"));
assertEquals("1",statItem1.getString("count"));
assertEquals("mapreduce",statItem2.getString("type"));
assertEquals("1",statItem2.getString("count"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states","wrong_state").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
exception=json.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
message=exception.getString("message");
type=exception.getString("exception");
className=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","Invalid application-state wrong_state",message);
WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className);
}
finally {
rm.stop();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryAppTypes() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
assertTrue(maxAppAttempts > 1);
int numAttempt=1;
while (true) {
amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FAILED);
if (numAttempt == maxAppAttempts) {
rm.waitForState(app1.getApplicationId(),RMAppState.FAILED);
break;
}
rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
numAttempt++;
}
assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size());
testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStatesComma() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testSingleAppKill() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE};
for ( String mediaType : mediaTypes) {
for ( MediaType contentType : contentTypes) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class);
AppState targetState=new AppState(YarnApplicationState.KILLED.toString());
Object entity;
if (contentType == MediaType.APPLICATION_JSON_TYPE) {
entity=appStateToJSON(targetState);
}
else {
entity=targetState;
}
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.ACCEPTED,response.getClientResponseStatus());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
else {
verifyAppStateXML(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
String locationHeaderValue=response.getHeaders().getFirst(HttpHeaders.LOCATION);
Client c=Client.create();
WebResource tmp=c.resource(locationHeaderValue);
if (isAuthenticationEnabled()) {
tmp=tmp.queryParam("user.name",webserviceUserName);
}
response=tmp.get(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/" + app.getApplicationId().toString() + "/state"));
while (true) {
Thread.sleep(100);
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).entity(entity,contentType).put(ClientResponse.class);
assertTrue((response.getClientResponseStatus() == Status.ACCEPTED) || (response.getClientResponseStatus() == Status.OK));
if (response.getClientResponseStatus() == Status.OK) {
assertEquals(RMAppState.KILLED,app.getState());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLED);
}
else {
verifyAppStateXML(response,RMAppState.KILLED);
}
break;
}
}
}
}
rm.stop();
return;
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPerUserResourcesJSON() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject schedulerInfo=json.getJSONObject("scheduler").getJSONObject("schedulerInfo");
JSONObject b1=getSubQueue(getSubQueue(schedulerInfo,"b"),"b1");
JSONArray users=b1.getJSONObject("users").getJSONArray("user");
for (int i=0; i < 2; ++i) {
JSONObject user=users.getJSONObject(i);
assertTrue("User isn't user1 or user2",user.getString("username").equals("user1") || user.getString("username").equals("user2"));
user.getInt("numActiveApplications");
user.getInt("numPendingApplications");
checkResourcesUsed(user);
}
}
finally {
rm.stop();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test per user resources and resourcesUsed elements in the web services XML
* @throws Exception
*/
@Test public void testPerUserResourcesXML() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws/v1/cluster/scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilder db=DocumentBuilderFactory.newInstance().newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList allUsers=dom.getElementsByTagName("users");
for (int i=0; i < allUsers.getLength(); ++i) {
Node perUserResources=allUsers.item(i);
String queueName=getChildNodeByName(perUserResources.getParentNode(),"queueName").getTextContent();
if (queueName.equals("b1")) {
assertEquals(2,perUserResources.getChildNodes().getLength());
NodeList users=perUserResources.getChildNodes();
for (int j=0; j < users.getLength(); ++j) {
Node user=users.item(j);
String username=getChildNodeByName(user,"username").getTextContent();
assertTrue(username.equals("user1") || username.equals("user2"));
Integer.parseInt(getChildNodeByName(getChildNodeByName(user,"resourcesUsed"),"memory").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numActiveApplications").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numPendingApplications").getTextContent());
}
}
else {
assertEquals(0,perUserResources.getChildNodes().getLength());
}
}
NodeList allResourcesUsed=dom.getElementsByTagName("resourcesUsed");
for (int i=0; i < allResourcesUsed.getLength(); ++i) {
Node resourcesUsed=allResourcesUsed.item(i);
Integer.parseInt(getChildNodeByName(resourcesUsed,"memory").getTextContent());
Integer.parseInt(getChildNodeByName(resourcesUsed,"vCores").getTextContent());
}
}
finally {
rm.stop();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenAuth() throws Exception {
final String token=getDelegationToken("test");
ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo();
String appid="application_123_0";
app.setApplicationId(appid);
String requestBody=getMarshalledAppInfo(app);
URL url=new URL("http://localhost:8088/ws/v1/cluster/apps");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
setupConn(conn,"POST","application/xml",requestBody);
try {
conn.getInputStream();
fail("we should not be here");
}
catch ( IOException e) {
assertEquals(Status.UNAUTHORIZED.getStatusCode(),conn.getResponseCode());
}
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody);
conn.getInputStream();
boolean appExists=rm.getRMContext().getRMApps().containsKey(ConverterUtils.toApplicationId(appid));
assertTrue(appExists);
RMApp actualApp=rm.getRMContext().getRMApps().get(ConverterUtils.toApplicationId(appid));
String owner=actualApp.getUser();
assertEquals("client",owner);
return;
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRenewDelegationToken() throws Exception {
client().addFilter(new LoggingFilter(System.out));
rm.start();
final String renewer="client2";
this.client().addFilter(new LoggingFilter(System.out));
final DelegationToken dummyToken=new DelegationToken();
dummyToken.setRenewer(renewer);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( final String mediaType : mediaTypes) {
for ( final String contentType : mediaTypes) {
if (isKerberosAuth == false) {
verifySimpleAuthRenew(mediaType,contentType);
continue;
}
final DelegationToken responseToken=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dummyToken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
assertFalse(tok.getToken().isEmpty());
String body=generateRenewTokenBody(mediaType,tok.getToken());
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,tok.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return tok;
}
}
);
KerberosTestUtils.doAs(renewer,new Callable(){
@Override public DelegationToken call() throws Exception {
long oldExpirationTime=Time.now();
assertValidRMToken(responseToken.getToken());
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
String message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
oldExpirationTime=tok.getNextExpirationTime();
Thread.sleep(1000);
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
tok=getDelegationTokenFromResponse(response);
message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
return tok;
}
}
);
KerberosTestUtils.doAs("client3",new Callable(){
@Override public DelegationToken call() throws Exception {
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return null;
}
}
);
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
String token="TEST_TOKEN_STRING";
String body="";
if (mediaType.equals(MediaType.APPLICATION_JSON)) {
body="{\"token\": \"" + token + "\" }";
}
else {
body="" + token + " ";
}
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
return null;
}
}
);
}
}
rm.stop();
return;
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckVersion() throws IOException {
LeveldbTimelineStore dbStore=(LeveldbTimelineStore)store;
Version defaultVersion=dbStore.getCurrentVersion();
Assert.assertEquals(defaultVersion,dbStore.loadVersion());
Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2);
dbStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion,dbStore.loadVersion());
restartTimelineStore();
dbStore=(LeveldbTimelineStore)store;
Assert.assertEquals(defaultVersion,dbStore.loadVersion());
Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion());
dbStore.storeVersion(incompatibleVersion);
try {
restartTimelineStore();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for timeline store"));
}
}
UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCorruptedOwnerInfo() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"owner");
TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf);
TimelineEntity entity=new TimelineEntity();
try {
timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("owner"),entity);
Assert.fail("Exception is expected");
}
catch ( YarnException e) {
Assert.assertTrue("It's not the exact expected exception",e.getMessage().contains("is corrupted."));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testYarnACLsNotEnabled() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,false);
TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf);
TimelineEntity entity=new TimelineEntity();
entity.addPrimaryFilter(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),"owner");
Assert.assertTrue("Always true when ACLs are not enabled",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("user"),entity));
}
InternalCallVerifier BooleanVerifier
@Test public void testYarnACLsEnabled() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"admin");
TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf);
TimelineEntity entity=new TimelineEntity();
entity.addPrimaryFilter(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),"owner");
Assert.assertTrue("Owner should be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("owner"),entity));
Assert.assertFalse("Other shouldn't be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("other"),entity));
Assert.assertTrue("Admin should be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("admin"),entity));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPutEntities() throws Exception {
TestTimelineClient client=new TestTimelineClient();
try {
client.init(conf);
client.start();
TimelineEntity expectedEntity=new TimelineEntity();
expectedEntity.setEntityType("test entity type");
expectedEntity.setEntityId("test entity id");
TimelineEvent event=new TimelineEvent();
event.setEventType("test event type");
event.setTimestamp(0L);
expectedEntity.addEvent(event);
TimelinePutResponse response=client.putEntities(expectedEntity);
Assert.assertEquals(0,response.getErrors().size());
Assert.assertTrue(client.resp.toString().contains("https"));
TimelineEntity actualEntity=store.getEntity(expectedEntity.getEntityId(),expectedEntity.getEntityType(),EnumSet.allOf(Field.class));
Assert.assertNotNull(actualEntity);
Assert.assertEquals(expectedEntity.getEntityId(),actualEntity.getEntityId());
Assert.assertEquals(expectedEntity.getEntityType(),actualEntity.getEntityType());
}
finally {
client.stop();
client.close();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
@Test public void testExceptionHandling() throws Exception {
InvocationHandler rtExcHandler=new InvocationHandler(){
@Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable {
throw new RuntimeException("forced runtime error");
}
}
;
DBIterator dbiter=(DBIterator)Proxy.newProxyInstance(DBIterator.class.getClassLoader(),new Class[]{DBIterator.class},rtExcHandler);
LeveldbIterator iter=new LeveldbIterator(dbiter);
for ( CallInfo ci : RTEXC_METHODS) {
Method method=iter.getClass().getMethod(ci.methodName,ci.argTypes);
assertNotNull("unable to locate method " + ci.methodName,method);
try {
method.invoke(iter,ci.args);
fail("operation should have thrown");
}
catch ( InvocationTargetException ite) {
Throwable exc=ite.getTargetException();
assertTrue("Method " + ci.methodName + " threw non-DBException: "+ exc,exc instanceof DBException);
assertFalse("Method " + ci.methodName + " double-wrapped DBException",exc.getCause() instanceof DBException);
}
}
try {
iter.close();
fail("operation shoul have thrown");
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=5000) public void testWebAppProxyServlet() throws Exception {
Configuration configuration=new Configuration();
configuration.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9090");
configuration.setInt("hadoop.http.max.threads",5);
WebAppProxyServerForTest proxy=new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort=proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
AppReportFetcherForTest appReportFetcher=proxy.proxy.appReportFetcher;
try {
URL wrongUrl=new URL("http://localhost:" + proxyPort + "/proxy/app");
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode());
URL url=new URL("http://localhost:" + proxyPort + "/proxy/application_00_0");
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
assertTrue(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=1;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=4;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=2;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
String s=readInputStream(proxyConn.getInputStream());
assertTrue(s.contains("to continue to an Application Master web interface owned by"));
assertTrue(s.contains("WARNING: The following page may not be safe!"));
appReportFetcher.answer=3;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
}
finally {
proxy.close();
}
}
BooleanVerifier
@Test(timeout=5000) @SuppressWarnings("deprecation") public void filterNullCookies() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(null);
Mockito.when(request.getRemoteAddr()).thenReturn(proxyHost);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
final AtomicBoolean invoked=new AtomicBoolean();
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
invoked.set(true);
}
}
;
Map params=new HashMap();
params.put(AmIpFilter.PROXY_HOST,proxyHost);
params.put(AmIpFilter.PROXY_URI_BASE,proxyUri);
FilterConfig conf=new DummyFilterConfig(params);
Filter filter=new TestAmIpFilter();
filter.init(conf);
filter.doFilter(request,response,chain);
assertTrue(invoked.get());
filter.destroy();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test AmIpFilter
*/
@Test(timeout=1000) @SuppressWarnings("deprecation") public void testFilter() throws Exception {
Map params=new HashMap();
params.put(AmIpFilter.PROXY_HOST,proxyHost);
params.put(AmIpFilter.PROXY_URI_BASE,proxyUri);
FilterConfig config=new DummyFilterConfig(params);
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
doFilterRequest=servletRequest.getClass().getName();
if (servletRequest instanceof AmIpServletRequestWrapper) {
servletWrapper=(AmIpServletRequestWrapper)servletRequest;
}
}
}
;
AmIpFilter testFilter=new AmIpFilter();
testFilter.init(config);
HttpServletResponseForTest response=new HttpServletResponseForTest();
ServletRequest failRequest=Mockito.mock(ServletRequest.class);
try {
testFilter.doFilter(failRequest,response,chain);
fail();
}
catch ( ServletException e) {
assertEquals("This filter only works for HTTP/HTTPS",e.getMessage());
}
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("redirect");
Mockito.when(request.getRequestURI()).thenReturn("/redirect");
testFilter.doFilter(request,response,chain);
assertEquals("http://bogus/redirect",response.getRedirect());
Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
testFilter.doFilter(request,response,chain);
assertTrue(doFilterRequest.contains("javax.servlet.http.HttpServletRequest"));
Cookie[] cookies=new Cookie[1];
cookies[0]=new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME,"user");
Mockito.when(request.getCookies()).thenReturn(cookies);
testFilter.doFilter(request,response,chain);
assertEquals("org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper",doFilterRequest);
assertEquals("user",servletWrapper.getUserPrincipal().getName());
assertEquals("user",servletWrapper.getRemoteUser());
assertFalse(servletWrapper.isUserInRole(""));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNMSimulator() throws Exception {
NMSimulator node1=new NMSimulator();
node1.init("rack1/node1",GB * 10,10,0,1000,rm);
node1.middleStep();
Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes());
Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB());
Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores());
ContainerId cId1=newContainerId(1,1,1);
Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container1,100000l);
Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1));
ContainerId cId2=newContainerId(2,1,1);
Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container2,-1l);
Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2));
node1.cleanupContainer(cId1);
Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1));
node1.cleanupContainer(cId2);
Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2));
}
BooleanVerifier
@Test public void testDualTask() throws Exception {
runner.start();
runner.schedule(new DualTask(0,10,10));
DualTask.latch.await(5000,TimeUnit.MILLISECONDS);
Assert.assertTrue(DualTask.first);
Assert.assertTrue(DualTask.last);
}
BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiTask() throws Exception {
runner.start();
runner.schedule(new MultiTask(0,20,5));
MultiTask.latch.await(5000,TimeUnit.MILLISECONDS);
Assert.assertTrue(MultiTask.first);
Assert.assertEquals((20 - 0) / 5 - 2 + 1,MultiTask.middle);
Assert.assertTrue(MultiTask.last);
}
BooleanVerifier
@Test public void testSingleTask() throws Exception {
runner.start();
runner.schedule(new SingleTask(0));
SingleTask.latch.await(5000,TimeUnit.MILLISECONDS);
Assert.assertTrue(SingleTask.first);
}
BooleanVerifier
@Test public void testTriTask() throws Exception {
runner.start();
runner.schedule(new TriTask(0,10,5));
TriTask.latch.await(5000,TimeUnit.MILLISECONDS);
Assert.assertTrue(TriTask.first);
Assert.assertTrue(TriTask.middle);
Assert.assertTrue(TriTask.last);
}
BooleanVerifier
@Test public void testPreStartQueueing() throws Exception {
runner.schedule(new PreStartTask(210));
Thread.sleep(210);
runner.start();
long startedAt=System.currentTimeMillis();
PreStartTask.latch.await(5000,TimeUnit.MILLISECONDS);
long runAt=System.currentTimeMillis();
Assert.assertTrue(PreStartTask.first);
Assert.assertTrue(runAt - startedAt >= 200);
}
APIUtilityVerifier BooleanVerifier
@Test public void testTrackPageHtmlTemplate() throws Exception {
String trackTemplate=FileUtils.readFileToString(new File("src/main/html/track.html.template"));
String trackedQueueInfo="";
Set trackedQueues=new HashSet();
trackedQueues.add("sls_queue_1");
trackedQueues.add("sls_queue_2");
trackedQueues.add("sls_queue_3");
for ( String queue : trackedQueues) {
trackedQueueInfo+="";
}
String trackedAppInfo="";
Set trackedApps=new HashSet();
trackedApps.add("app_1");
trackedApps.add("app_2");
for ( String job : trackedApps) {
trackedAppInfo+="";
}
String trackInfo=MessageFormat.format(trackTemplate,trackedQueueInfo,trackedAppInfo,"s",1000,1000);
Assert.assertTrue("The queue/app tracking html page should not be empty",trackInfo.length() > 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testSimulatePageHtmlTemplate() throws Exception {
String simulateTemplate=FileUtils.readFileToString(new File("src/main/html/simulate.html.template"));
Set queues=new HashSet();
queues.add("sls_queue_1");
queues.add("sls_queue_2");
queues.add("sls_queue_3");
String queueInfo="";
int i=0;
for ( String queue : queues) {
queueInfo+="legends[4][" + i + "] = 'queue"+ queue+ ".allocated.memory'";
queueInfo+="legends[5][" + i + "] = 'queue"+ queue+ ".allocated.vcores'";
i++;
}
String simulateInfo=MessageFormat.format(simulateTemplate,queueInfo,"s",1000,1000);
Assert.assertTrue("The simulate page html page should not be empty",simulateInfo.length() > 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testSimulateInfoPageHtmlTemplate() throws Exception {
String simulateInfoTemplate=FileUtils.readFileToString(new File("src/main/html/simulate.info.html.template"));
SLSRunner.simulateInfoMap.put("Number of racks",10);
SLSRunner.simulateInfoMap.put("Number of nodes",100);
SLSRunner.simulateInfoMap.put("Node memory (MB)",1024);
SLSRunner.simulateInfoMap.put("Node VCores",1);
SLSRunner.simulateInfoMap.put("Number of applications",100);
SLSRunner.simulateInfoMap.put("Number of tasks",1000);
SLSRunner.simulateInfoMap.put("Average tasks per applicaion",10);
SLSRunner.simulateInfoMap.put("Number of queues",4);
SLSRunner.simulateInfoMap.put("Average applications per queue",25);
SLSRunner.simulateInfoMap.put("Estimated simulate time (s)",10000);
StringBuilder info=new StringBuilder();
for ( Map.Entry entry : SLSRunner.simulateInfoMap.entrySet()) {
info.append("");
info.append("" + entry.getKey() + " ");
info.append("" + entry.getValue() + " ");
info.append(" ");
}
String simulateInfo=MessageFormat.format(simulateInfoTemplate,info.toString());
Assert.assertTrue("The simulate info html page should not be empty",simulateInfo.length() > 0);
for ( Map.Entry entry : SLSRunner.simulateInfoMap.entrySet()) {
Assert.assertTrue("The simulate info html page should have information " + "of " + entry.getKey(),simulateInfo.contains("" + entry.getKey() + " "+ entry.getValue()+ " "));
}
}
BooleanVerifier
@Test(timeout=10000) public void testDirDownload() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
for (int i=0; i < 5; ++i) {
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis=LocalResourceVisibility.APPLICATION;
}
Path p=new Path(basedir,"dir" + i + ".jar");
LocalResource rsrc=createJar(files,p,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
for ( Future path : pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for ( Map.Entry> p : pending.entrySet()) {
Path localized=p.getValue().get();
FileStatus status=files.getFileStatus(localized);
System.out.println("Testing path " + localized);
assert (status.isDirectory());
assert (rsrcVis.containsKey(p.getKey()));
verifyPermsRecursively(localized.getFileSystem(conf),files,localized,rsrcVis.get(p.getKey()));
}
}
catch ( ExecutionException e) {
throw new IOException("Failed exec",e);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int size=512;
LocalResourceVisibility vis=LocalResourceVisibility.PUBLIC;
Path path=new Path(basedir,"test-file");
LocalResource rsrc=createFile(files,path,size,rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),size,conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for ( Map.Entry> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
}
catch ( ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testUniqueDestinationPath() throws Exception {
Configuration conf=new Configuration();
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
ExecutorService singleThreadedExec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
Path p=new Path(basedir,"dir" + 0 + ".jar");
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
LocalResource rsrc=createJar(files,p,vis);
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
Future rPath=singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(rPath.isDone());
Assert.assertEquals(destPath,rPath.get().getParent());
}
BooleanVerifier AssumptionSetter IdentityVerifier HybridVerifier
@Test(timeout=60000) public void testDownloadPublicWithStatCache() throws IOException, URISyntaxException, InterruptedException, ExecutionException {
final Configuration conf=new Configuration();
FileContext files=FileContext.getLocalFSFileContext(conf);
Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
FileSystem f=basedir.getFileSystem(conf);
assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f,basedir,null));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
int size=512;
final ConcurrentMap counts=new ConcurrentHashMap();
final CacheLoader> loader=FSDownload.createStatusCacheLoader(conf);
final LoadingCache> statCache=CacheBuilder.newBuilder().build(new CacheLoader>(){
public Future load( Path path) throws Exception {
AtomicInteger count=counts.get(path);
if (count == null) {
count=new AtomicInteger(0);
AtomicInteger existing=counts.putIfAbsent(path,count);
if (existing != null) {
count=existing;
}
}
count.incrementAndGet();
return loader.load(path);
}
}
);
final int fileCount=3;
List> tasks=new ArrayList>();
for (int i=0; i < fileCount; i++) {
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
final Path path=new Path(basedir,"test-file-" + i);
createFile(files,path,size,rand);
final FileSystem fs=path.getFileSystem(conf);
final FileStatus sStat=fs.getFileStatus(path);
tasks.add(new Callable(){
public Boolean call() throws IOException {
return FSDownload.isPublic(fs,path,sStat,statCache);
}
}
);
}
ExecutorService exec=Executors.newFixedThreadPool(fileCount);
try {
List> futures=exec.invokeAll(tasks);
for ( Future future : futures) {
assertTrue(future.get());
}
for ( AtomicInteger count : counts.values()) {
assertSame(count.get(),1);
}
}
finally {
exec.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testDownload() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int[] sizes=new int[10];
for (int i=0; i < 10; ++i) {
sizes[i]=rand.nextInt(512) + 512;
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis=LocalResourceVisibility.APPLICATION;
}
Path p=new Path(basedir,"" + i);
LocalResource rsrc=createFile(files,p,sizes[i],rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),sizes[i],conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
for ( Future path : pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for ( Map.Entry> p : pending.entrySet()) {
Path localized=p.getValue().get();
assertEquals(sizes[Integer.valueOf(localized.getName())],p.getKey().getSize());
FileStatus status=files.getFileStatus(localized.getParent());
FsPermission perm=status.getPermission();
assertEquals("Cache directory permissions are incorrect",new FsPermission((short)0755),perm);
status=files.getFileStatus(localized);
perm=status.getPermission();
System.out.println("File permission " + perm + " for rsrc vis "+ p.getKey().getVisibility().name());
assert (rsrcVis.containsKey(p.getKey()));
Assert.assertTrue("Private file should be 500",perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort());
}
}
catch ( ExecutionException e) {
throw new IOException("Failed exec",e);
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testProcessTree() throws Exception {
try {
Assert.assertTrue(ProcfsBasedProcessTree.isAvailable());
}
catch ( Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",false);
return;
}
Random rm=new Random();
File tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_shellScript_" + rm.nextInt()+ ".sh");
tempFile.deleteOnExit();
shellScript=TEST_ROOT_DIR + File.separator + tempFile.getName();
tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_pidFile_" + rm.nextInt()+ ".pid");
tempFile.deleteOnExit();
pidFile=TEST_ROOT_DIR + File.separator + tempFile.getName();
lowestDescendant=TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
try {
FileWriter fWriter=new FileWriter(shellScript);
fWriter.write("# rogue task\n" + "sleep 1\n" + "echo hello\n"+ "if [ $1 -ne 0 ]\n"+ "then\n"+ " sh " + shellScript + " $(($1-1))\n"+ "else\n"+ " echo $$ > "+ lowestDescendant+ "\n"+ " while true\n do\n"+ " sleep 5\n"+ " done\n"+ "fi");
fWriter.close();
}
catch ( IOException ioe) {
LOG.info("Error: " + ioe);
return;
}
Thread t=new RogueTaskThread();
t.start();
String pid=getRogueTaskPID();
LOG.info("Root process pid: " + pid);
ProcfsBasedProcessTree p=createProcessTree(pid);
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
File leaf=new File(lowestDescendant);
while (!leaf.exists()) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ie) {
break;
}
}
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
String processTreeDump=p.getProcessTreeDump();
destroyProcessTree(pid);
boolean isAlive=true;
for (int tries=100; tries > 0; tries--) {
if (isSetsidAvailable()) {
isAlive=isAnyProcessInTreeAlive(p);
}
else {
isAlive=isAlive(pid);
}
if (!isAlive) {
break;
}
Thread.sleep(100);
}
if (isAlive) {
fail("ProcessTree shouldn't be alive");
}
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=N; i >= 0; i--) {
String cmdLineDump="\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" + " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " "+ i;
Pattern pat=Pattern.compile(cmdLineDump);
Matcher mat=pat.matcher(processTreeDump);
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i + "th process!",mat.find());
}
try {
t.join(2000);
LOG.info("RogueTaskThread successfully joined.");
}
catch ( InterruptedException ie) {
LOG.info("Interrupted while joining RogueTaskThread.");
}
p.updateProcessTree();
Assert.assertFalse("ProcessTree must have been gone",isAlive(pid));
Assert.assertTrue("Cumulative vmem for the gone-process is " + p.getCumulativeVmem() + " . It should be zero.",p.getCumulativeVmem() == 0);
Assert.assertTrue(p.toString().equals("[ ]"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test the correctness of process-tree dump.
* @throws IOException
*/
@Test(timeout=30000) public void testProcessTreeDump() throws IOException {
String[] pids={"100","200","300","400","500","600"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
int numProcesses=pids.length;
ProcessStatInfo[] procInfos=new ProcessStatInfo[numProcesses];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","200","100","100","400000","400","4000","800"});
procInfos[4]=new ProcessStatInfo(new String[]{"500","proc5","400","100","100","400000","400","4000","800"});
procInfos[5]=new ProcessStatInfo(new String[]{"600","proc6","1","1","1","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfos=new ProcessTreeSmapMemInfo[6];
memInfos[0]=new ProcessTreeSmapMemInfo("100");
memInfos[1]=new ProcessTreeSmapMemInfo("200");
memInfos[2]=new ProcessTreeSmapMemInfo("300");
memInfos[3]=new ProcessTreeSmapMemInfo("400");
memInfos[4]=new ProcessTreeSmapMemInfo("500");
memInfos[5]=new ProcessTreeSmapMemInfo("600");
String[] cmdLines=new String[numProcesses];
cmdLines[0]="proc1 arg1 arg2";
cmdLines[1]="proc2 arg3 arg4";
cmdLines[2]="proc3 arg5 arg6";
cmdLines[3]="proc4 arg7 arg8";
cmdLines[4]="proc5 arg9 arg10";
cmdLines[5]="proc6 arg11 arg12";
createMemoryMappingInfo(memInfos);
writeStatFiles(procfsRootDir,pids,procInfos,memInfos);
writeCmdLineFiles(procfsRootDir,pids,cmdLines);
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.updateProcessTree();
String processTreeDump=processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=0; i < 5; i++) {
ProcessStatInfo p=procInfos[i];
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ p.rssmemPage+ " "+ cmdLines[i]));
}
ProcessStatInfo p=procInfos[5];
Assert.assertFalse("Process-tree dump shouldn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ cmdLines[5]));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
BooleanVerifier
/**
* Verifies ProcfsBasedProcessTree.checkPidPgrpidForMatch() in case of
* 'constructProcessInfo() returning null' by not writing stat file for the
* mock process
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout=30000) public void testDestroyProcessTree() throws IOException {
String pid="100";
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
createProcessTree(pid,procfsRootDir.getAbsolutePath());
Assert.assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(pid,procfsRootDir.getAbsolutePath()));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void tree(){
if (!Shell.WINDOWS) {
LOG.info("Platform not Windows. Not testing");
return;
}
assertTrue("WindowsBasedProcessTree should be available on Windows",WindowsBasedProcessTree.isAvailable());
WindowsBasedProcessTreeTester pTree=new WindowsBasedProcessTreeTester("-1");
pTree.infoStr="3524,1024,1024,500\r\n2844,1024,1024,500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getCumulativeVmem(0) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getCumulativeRssmem(0) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 1000);
pTree.infoStr="3524,1024,1024,1000\r\n2844,1024,1024,1000\r\n1234,1024,1024,1000\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 3072);
assertTrue(pTree.getCumulativeVmem(1) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 3072);
assertTrue(pTree.getCumulativeRssmem(1) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 3000);
pTree.infoStr="3524,1024,1024,1500\r\n2844,1024,1024,1500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getCumulativeVmem(2) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getCumulativeRssmem(2) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 4000);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=20000) public void refreshAndCpuUsage() throws InterruptedException {
WindowsResourceCalculatorPluginTester tester=new WindowsResourceCalculatorPluginTester();
tester.infoStr="17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.cpuUsage == -1);
tester.infoStr="17177038848,8589467648,15232745472,5400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.cpuUsage == -1);
Thread.sleep(1500);
tester.infoStr="17177038848,8589467648,15232745472,5400417792,1,2805000,6286812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 5400417792L);
assertTrue(tester.cpuUsage >= 0.1);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void parseSystemInfoString(){
WindowsResourceCalculatorPluginTester tester=new WindowsResourceCalculatorPluginTester();
tester.infoStr="17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.vmemSize == 17177038848L);
assertTrue(tester.memSize == 8589467648L);
assertTrue(tester.vmemAvailable == 15232745472L);
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.numProcessors == 1);
assertTrue(tester.cpuFrequencyKhz == 2805000L);
assertTrue(tester.cumulativeCpuTimeMs == 6261812L);
assertTrue(tester.cpuUsage == -1);
}
BooleanVerifier NullVerifier HybridVerifier
/**
* Test the yarn version info routines.
* @throws IOException
*/
@Test public void versionInfoGenerated() throws IOException {
assertTrue("getVersion returned Unknown",!YarnVersionInfo.getVersion().equals("Unknown"));
assertTrue("getUser returned Unknown",!YarnVersionInfo.getUser().equals("Unknown"));
assertTrue("getSrcChecksum returned Unknown",!YarnVersionInfo.getSrcChecksum().equals("Unknown"));
assertNotNull("getUrl returned null",YarnVersionInfo.getUrl());
assertNotNull("getRevision returned null",YarnVersionInfo.getRevision());
assertNotNull("getBranch returned null",YarnVersionInfo.getBranch());
assertTrue("getBuildVersion check doesn't contain: source checksum",YarnVersionInfo.getBuildVersion().contains("source checksum"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateWithPort(){
WebApp app=WebApps.$for(this).at(0).start();
int port=app.getListenerAddress().getPort();
assertTrue(port > 0);
app.stop();
app=WebApps.$for(this).at(port).start();
assertEquals(port,app.getListenerAddress().getPort());
app.stop();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCustomRoutes() throws Exception {
WebApp app=WebApps.$for("test",TestWebApp.class,this,"ws").start(new WebApp(){
@Override public void setup(){
bind(MyTestJAXBContextResolver.class);
bind(MyTestWebService.class);
route("/:foo",FooController.class);
route("/bar/foo",FooController.class,"bar");
route("/foo/:foo",DefaultController.class);
route("/foo/bar/:foo",DefaultController.class,"index");
}
}
);
String baseUrl=baseUrl(app);
try {
assertEquals("foo",getContent(baseUrl).trim());
assertEquals("foo",getContent(baseUrl + "test").trim());
assertEquals("foo1",getContent(baseUrl + "test/1").trim());
assertEquals("bar",getContent(baseUrl + "test/bar/foo").trim());
assertEquals("default",getContent(baseUrl + "test/foo/bar").trim());
assertEquals("default1",getContent(baseUrl + "test/foo/1").trim());
assertEquals("default2",getContent(baseUrl + "test/foo/bar/2").trim());
assertEquals(404,getResponseCode(baseUrl + "test/goo"));
assertEquals(200,getResponseCode(baseUrl + "ws/v1/test"));
assertTrue(getContent(baseUrl + "ws/v1/test").contains("myInfo"));
}
finally {
app.stop();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testServePathsNoName(){
WebApp app=WebApps.$for("",this).start();
assertEquals("/",app.getRedirectPath());
String[] expectedPaths={"/*"};
String[] pathSpecs=app.getServePathSpecs();
assertEquals(1,pathSpecs.length);
for (int i=0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i]));
}
app.stop();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testYARNWebAppContext() throws Exception {
System.setProperty("hadoop.log.dir","/Not/Existing/dir");
WebApp app=WebApps.$for("test",this).start(new WebApp(){
@Override public void setup(){
route("/",FooController.class);
}
}
);
String baseUrl=baseUrl(app);
try {
assertFalse("foo".equals(getContent(baseUrl + "static").trim()));
assertEquals(404,getResponseCode(baseUrl + "logs"));
assertEquals("foo",getContent(baseUrl).trim());
}
finally {
app.stop();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testServePaths(){
WebApp app=WebApps.$for("test",this).start();
assertEquals("/test",app.getRedirectPath());
String[] expectedPaths={"/test","/test/*"};
String[] pathSpecs=app.getServePathSpecs();
assertEquals(2,pathSpecs.length);
for (int i=0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i]));
}
app.stop();
}
BooleanVerifier
@Test(timeout=60000L) public void testJavaScriptInfoBlock() throws Exception {
WebAppTests.testBlock(JavaScriptInfoBlock.class);
TestInfoBlock.pw.flush();
String output=TestInfoBlock.sw.toString();
assertFalse(output.contains("